max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
regulations/tests/runner.py | cmc333333/regulations-site | 0 | 6619951 | <reponame>cmc333333/regulations-site
from django_nose.runner import NoseTestSuiteRunner
class DatabaselessTestRunner(NoseTestSuiteRunner):
""" A test suite runner that does not setup and tear down a database. """
def setup_databases(self):
pass
def teardown_databases(self, *args):
pass
| from django_nose.runner import NoseTestSuiteRunner
class DatabaselessTestRunner(NoseTestSuiteRunner):
""" A test suite runner that does not setup and tear down a database. """
def setup_databases(self):
pass
def teardown_databases(self, *args):
pass | en | 0.913562 | A test suite runner that does not setup and tear down a database. | 1.670735 | 2 |
django/solution/untitled/mna/apps.py | giserh/book-python | 1 | 6619952 | from django.apps import AppConfig
class MnaConfig(AppConfig):
name = 'mna'
| from django.apps import AppConfig
class MnaConfig(AppConfig):
name = 'mna'
| none | 1 | 1.201025 | 1 | |
test_entity_extractor.py | darenr/simple-name-extractor | 0 | 6619953 | <filename>test_entity_extractor.py
#!/usr/bin/env python
# -*- coding: utf-8 -*--
import unittest
import codecs
from entity_extractor import EntityExtractor
class TestEntityExtractor(unittest.TestCase):
def test_1(self):
with codecs.open('test_documents/text_document1.txt', 'rb', 'utf-8') as f:
result = EntityExtractor().extract_entities(f.read())
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
def test_news(self):
with codecs.open('test_documents/text_document2.txt', 'rb', 'utf-8') as f:
result = EntityExtractor().extract_entities(f.read())
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
def test_url1(self):
result = EntityExtractor().extract_entities_from_url('https://en.wikipedia.org/wiki/Todd_Hido')
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
if __name__ == '__main__':
unittest.main()
| <filename>test_entity_extractor.py
#!/usr/bin/env python
# -*- coding: utf-8 -*--
import unittest
import codecs
from entity_extractor import EntityExtractor
class TestEntityExtractor(unittest.TestCase):
def test_1(self):
with codecs.open('test_documents/text_document1.txt', 'rb', 'utf-8') as f:
result = EntityExtractor().extract_entities(f.read())
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
def test_news(self):
with codecs.open('test_documents/text_document2.txt', 'rb', 'utf-8') as f:
result = EntityExtractor().extract_entities(f.read())
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
def test_url1(self):
result = EntityExtractor().extract_entities_from_url('https://en.wikipedia.org/wiki/Todd_Hido')
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
self.assertTrue(u'<NAME>' in result['PERSON'])
if __name__ == '__main__':
unittest.main()
| en | 0.287576 | #!/usr/bin/env python # -*- coding: utf-8 -*-- | 2.889209 | 3 |
exchange/hitbtc/hitbtc.py | inasie/PyExchange | 12 | 6619954 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import logging
from urllib3.request import urlencode
from exchange.errors import *
from exchange.utils.http_util import HttpUtil
class HitBTC():
"""
HitBTC
https://api.hitbtc.com/
"""
def __init__(self):
self._http = HttpUtil()
def get_currencies(self):
'''
Return the actual list of available currencies, tokens, ICO etc.
https://api.hitbtc.com/#currencies
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/currency'
return self._http.get(URL)
def get_symbols(self):
'''
Return the actual list of currency symbols (currency pairs) traded on HitBTC exchange.
The first listed currency of a symbol is called the base currency,
and the second currency is called the quote currency.
The currency pair indicates how much of the quote currency is needed to purchase one unit of the base currency.
https://api.hitbtc.com/#symbols
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/symbol'
return self._http.get(URL)
def get_tickers(self):
'''
Return ticker information
https://api.hitbtc.com/#tickers
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/ticker'
return self._http.get(URL)
def get_ticker(self, symbol):
'''
Return ticker information
https://api.hitbtc.com/#tickers
:param str symbol:
:return: json object
'''
URL = 'https://api.hitbtc.com/api/2/public/ticker/%s' % symbol
return self._http.get(URL)
def get_trades(self, symbol, sort=None, by=None, _from=None, till=None, limit=None, offset=None):
'''
Return trade data
https://api.hitbtc.com/#trades
:param str symbol:
:param str sort: Default DESC
:param str by: Filtration definition. Accepted values: id, timestamp. Default timestamp
:param int _from:
:param int till:
:param int limit:
:param int offset:
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/trades/%s' % symbol
params = {}
if sort is not None:
params['sort'] = sort
if by is not None:
params['by'] = by
if _from is not None:
params['from'] = _from
if till is not None:
params['till'] = till
if limit is not None:
params['limit'] = limit
if offset is not None:
params['offset'] = offset
return self._http.get(URL, params)
def get_orderbook(self, symbol, limit=None):
'''
An order book is an electronic list of buy and sell orders for a specific symbol, organized by price level.
https://api.hitbtc.com/#orderbook
:param str symbol:
:param itn limit: Limit of orderbook levels, default 100. Set 0 to view full orderbook levels
:return: json object
'''
URL = 'https://api.hitbtc.com/api/2/public/orderbook/%s' % symbol
params = {}
if limit is not None:
params['limit'] = limit
return self._http.get(URL, params)
def get_candles(self, symbol, limit=None, period=None):
'''
An candles used for OHLC a specific symbol.
https://api.hitbtc.com/#candles
:param str symbol:
:param int limit: Limit of candles, default 100.
:param string period: One of: M1 (one minute), M3, M5, M15, M30, H1, H4, D1, D7, 1M (one month). Default is M30 (30 minutes).
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/candles/%s' % symbol
params = {}
if limit is not None:
params['limit'] = limit
if period is not None:
params['period'] = period
return self._http.get(URL, params)
| # -*- coding: utf-8 -*-
import logging
from urllib3.request import urlencode
from exchange.errors import *
from exchange.utils.http_util import HttpUtil
class HitBTC():
"""
HitBTC
https://api.hitbtc.com/
"""
def __init__(self):
self._http = HttpUtil()
def get_currencies(self):
'''
Return the actual list of available currencies, tokens, ICO etc.
https://api.hitbtc.com/#currencies
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/currency'
return self._http.get(URL)
def get_symbols(self):
'''
Return the actual list of currency symbols (currency pairs) traded on HitBTC exchange.
The first listed currency of a symbol is called the base currency,
and the second currency is called the quote currency.
The currency pair indicates how much of the quote currency is needed to purchase one unit of the base currency.
https://api.hitbtc.com/#symbols
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/symbol'
return self._http.get(URL)
def get_tickers(self):
'''
Return ticker information
https://api.hitbtc.com/#tickers
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/ticker'
return self._http.get(URL)
def get_ticker(self, symbol):
'''
Return ticker information
https://api.hitbtc.com/#tickers
:param str symbol:
:return: json object
'''
URL = 'https://api.hitbtc.com/api/2/public/ticker/%s' % symbol
return self._http.get(URL)
def get_trades(self, symbol, sort=None, by=None, _from=None, till=None, limit=None, offset=None):
'''
Return trade data
https://api.hitbtc.com/#trades
:param str symbol:
:param str sort: Default DESC
:param str by: Filtration definition. Accepted values: id, timestamp. Default timestamp
:param int _from:
:param int till:
:param int limit:
:param int offset:
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/trades/%s' % symbol
params = {}
if sort is not None:
params['sort'] = sort
if by is not None:
params['by'] = by
if _from is not None:
params['from'] = _from
if till is not None:
params['till'] = till
if limit is not None:
params['limit'] = limit
if offset is not None:
params['offset'] = offset
return self._http.get(URL, params)
def get_orderbook(self, symbol, limit=None):
'''
An order book is an electronic list of buy and sell orders for a specific symbol, organized by price level.
https://api.hitbtc.com/#orderbook
:param str symbol:
:param itn limit: Limit of orderbook levels, default 100. Set 0 to view full orderbook levels
:return: json object
'''
URL = 'https://api.hitbtc.com/api/2/public/orderbook/%s' % symbol
params = {}
if limit is not None:
params['limit'] = limit
return self._http.get(URL, params)
def get_candles(self, symbol, limit=None, period=None):
'''
An candles used for OHLC a specific symbol.
https://api.hitbtc.com/#candles
:param str symbol:
:param int limit: Limit of candles, default 100.
:param string period: One of: M1 (one minute), M3, M5, M15, M30, H1, H4, D1, D7, 1M (one month). Default is M30 (30 minutes).
:return: json array
'''
URL = 'https://api.hitbtc.com/api/2/public/candles/%s' % symbol
params = {}
if limit is not None:
params['limit'] = limit
if period is not None:
params['period'] = period
return self._http.get(URL, params) | en | 0.633811 | # -*- coding: utf-8 -*- HitBTC https://api.hitbtc.com/ Return the actual list of available currencies, tokens, ICO etc. https://api.hitbtc.com/#currencies :return: json array Return the actual list of currency symbols (currency pairs) traded on HitBTC exchange. The first listed currency of a symbol is called the base currency, and the second currency is called the quote currency. The currency pair indicates how much of the quote currency is needed to purchase one unit of the base currency. https://api.hitbtc.com/#symbols :return: json array Return ticker information https://api.hitbtc.com/#tickers :return: json array Return ticker information https://api.hitbtc.com/#tickers :param str symbol: :return: json object Return trade data https://api.hitbtc.com/#trades :param str symbol: :param str sort: Default DESC :param str by: Filtration definition. Accepted values: id, timestamp. Default timestamp :param int _from: :param int till: :param int limit: :param int offset: :return: json array An order book is an electronic list of buy and sell orders for a specific symbol, organized by price level. https://api.hitbtc.com/#orderbook :param str symbol: :param itn limit: Limit of orderbook levels, default 100. Set 0 to view full orderbook levels :return: json object An candles used for OHLC a specific symbol. https://api.hitbtc.com/#candles :param str symbol: :param int limit: Limit of candles, default 100. :param string period: One of: M1 (one minute), M3, M5, M15, M30, H1, H4, D1, D7, 1M (one month). Default is M30 (30 minutes). :return: json array | 3.19533 | 3 |
predict_real_time.py | angelvillar96/FaceEmoji | 2 | 6619955 | <filename>predict_real_time.py
###########################################################################
# FaceEmoji/test_face_cropping.py
# This file tests the face cropping functionality using open CV
###########################################################################
import os
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
from PIL import Image
import Lib.utils as utils
def main():
# initializing camera
cam = cv.VideoCapture(0)
cv.namedWindow("Test Face Cropping")
# initalizng face cropper
face_crop = utils.FaceCrop()
# main loop
while True:
# taking image and processing it
ret, frame = cam.read()
faces = face_crop.crop_face_from_image(frame)
# getting faces
face_crop.get_faces(frame, faces)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv.imshow("Test Face Cropping", frame)
if not ret:
break
k = cv.waitKey(1)
# when ESC pressed, finish running
if k%256 == 27:
print("Escape pressed, closing...")
break
# finishing camera
cam.release()
cv.destroyAllWindows()
def main2():
path = os.path.join(os.getcwd(),"Data","sunglasses","1 (13).png")
img = np.array(Image.open(path))
plt.figure()
plt.imshow(img)
#initalizng face cropper
face_crop = utils.FaceCrop(reshape=False)
# getting faces coords
faces = face_crop.crop_face_from_image(img)
# getting faces
face_imgs = face_crop.get_faces(img, faces)
plt.figure()
plt.imshow(face_imgs[0])
plt.show()
if __name__ == "__main__":
main()
#main2()
#
| <filename>predict_real_time.py
###########################################################################
# FaceEmoji/test_face_cropping.py
# This file tests the face cropping functionality using open CV
###########################################################################
import os
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
from PIL import Image
import Lib.utils as utils
def main():
# initializing camera
cam = cv.VideoCapture(0)
cv.namedWindow("Test Face Cropping")
# initalizng face cropper
face_crop = utils.FaceCrop()
# main loop
while True:
# taking image and processing it
ret, frame = cam.read()
faces = face_crop.crop_face_from_image(frame)
# getting faces
face_crop.get_faces(frame, faces)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv.imshow("Test Face Cropping", frame)
if not ret:
break
k = cv.waitKey(1)
# when ESC pressed, finish running
if k%256 == 27:
print("Escape pressed, closing...")
break
# finishing camera
cam.release()
cv.destroyAllWindows()
def main2():
path = os.path.join(os.getcwd(),"Data","sunglasses","1 (13).png")
img = np.array(Image.open(path))
plt.figure()
plt.imshow(img)
#initalizng face cropper
face_crop = utils.FaceCrop(reshape=False)
# getting faces coords
faces = face_crop.crop_face_from_image(img)
# getting faces
face_imgs = face_crop.get_faces(img, faces)
plt.figure()
plt.imshow(face_imgs[0])
plt.show()
if __name__ == "__main__":
main()
#main2()
#
| en | 0.326666 | ########################################################################### # FaceEmoji/test_face_cropping.py # This file tests the face cropping functionality using open CV ########################################################################### # initializing camera # initalizng face cropper # main loop # taking image and processing it # getting faces # Draw rectangle around the faces # when ESC pressed, finish running # finishing camera #initalizng face cropper # getting faces coords # getting faces #main2() # | 2.920429 | 3 |
coalition/__init__.py | thee-engineer/coalition | 1 | 6619956 | """Init for coalition."""
| """Init for coalition."""
| en | 0.846546 | Init for coalition. | 1.117241 | 1 |
bin/rerun.py | chaokunyang/awesome-bigdata-samples | 7 | 6619957 | <reponame>chaokunyang/awesome-bigdata-samples
# coding=utf-8
# 由于操作系统默认都是python2,因此以下脚本使用python2编写
"""
重新运行指定时间范围内的指定日期内的任务。
示例:
python rerun.py -start 2017/11/21 -end 2017/12/01 -task dayJob.sh
python rerun.py -start 2017/11/21 -end 2017/12/01 -r -task dayJob.sh
"""
from subprocess import call
from datetime import datetime, timedelta
import os
import logging
logger = logging.getLogger("rerun tool")
logger.setLevel(logging.INFO)
logger_handler = logging.FileHandler('rerun.log')
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger_handler.setFormatter(formatter)
logger.addHandler(logger_handler)
def get_opts(argv):
"""Collect command-line options in a dictionary
:rtype: dict
"""
opts = {}
argv = argv[1:] # exclude rerun.py argument
while argv: # 当这里有参数需要解析
if argv[0][0] == '-' and argv[1] and argv[1][0] != '-': # 找到"-name value" pair
opts[argv[0][1:]] = argv[1]
argv = argv[2:] # 前进两步
elif argv[0][0] == '-' and argv[1] and argv[1][0] == '-':
opts[argv[0][1:]] = True
argv = argv[1:] # 前进一步
else:
argv = argv[1:] # 跳过该参数
logger.info('input args : %s', opts)
return opts
def date_range(start_date, end_date, reverse=False):
"""日期范围"""
index_range = range(int((end_date - start_date).days))
if reverse:
index_range = range(int((end_date - start_date).days), 0, -1)
for n in index_range:
yield start_date + timedelta(n)
def run_task(task, day):
"""运行脚本任务"""
work_dir = os.getcwd()
logger.info('start run task: %s for %s', work_dir + "/" + task, day)
call([work_dir + "/" + task, day])
def rerun(opts):
"""离线任务重跑"""
start_day = datetime.strptime(opts['start'], '%Y/%m/%d')
end_day = datetime.strptime(opts['end'], '%Y/%m/%d')
task = opts['task']
reverse = False
if 'r' in opts:
reverse = True
for day in date_range(start_day, end_day, reverse):
run_task(task, day.strftime('%Y/%m/%d'))
if __name__ == "__main__":
from sys import argv
args = get_opts(argv)
rerun(args)
| # coding=utf-8
# 由于操作系统默认都是python2,因此以下脚本使用python2编写
"""
重新运行指定时间范围内的指定日期内的任务。
示例:
python rerun.py -start 2017/11/21 -end 2017/12/01 -task dayJob.sh
python rerun.py -start 2017/11/21 -end 2017/12/01 -r -task dayJob.sh
"""
from subprocess import call
from datetime import datetime, timedelta
import os
import logging
logger = logging.getLogger("rerun tool")
logger.setLevel(logging.INFO)
logger_handler = logging.FileHandler('rerun.log')
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger_handler.setFormatter(formatter)
logger.addHandler(logger_handler)
def get_opts(argv):
"""Collect command-line options in a dictionary
:rtype: dict
"""
opts = {}
argv = argv[1:] # exclude rerun.py argument
while argv: # 当这里有参数需要解析
if argv[0][0] == '-' and argv[1] and argv[1][0] != '-': # 找到"-name value" pair
opts[argv[0][1:]] = argv[1]
argv = argv[2:] # 前进两步
elif argv[0][0] == '-' and argv[1] and argv[1][0] == '-':
opts[argv[0][1:]] = True
argv = argv[1:] # 前进一步
else:
argv = argv[1:] # 跳过该参数
logger.info('input args : %s', opts)
return opts
def date_range(start_date, end_date, reverse=False):
"""日期范围"""
index_range = range(int((end_date - start_date).days))
if reverse:
index_range = range(int((end_date - start_date).days), 0, -1)
for n in index_range:
yield start_date + timedelta(n)
def run_task(task, day):
"""运行脚本任务"""
work_dir = os.getcwd()
logger.info('start run task: %s for %s', work_dir + "/" + task, day)
call([work_dir + "/" + task, day])
def rerun(opts):
"""离线任务重跑"""
start_day = datetime.strptime(opts['start'], '%Y/%m/%d')
end_day = datetime.strptime(opts['end'], '%Y/%m/%d')
task = opts['task']
reverse = False
if 'r' in opts:
reverse = True
for day in date_range(start_day, end_day, reverse):
run_task(task, day.strftime('%Y/%m/%d'))
if __name__ == "__main__":
from sys import argv
args = get_opts(argv)
rerun(args) | zh | 0.782442 | # coding=utf-8 # 由于操作系统默认都是python2,因此以下脚本使用python2编写 重新运行指定时间范围内的指定日期内的任务。 示例: python rerun.py -start 2017/11/21 -end 2017/12/01 -task dayJob.sh python rerun.py -start 2017/11/21 -end 2017/12/01 -r -task dayJob.sh Collect command-line options in a dictionary :rtype: dict # exclude rerun.py argument # 当这里有参数需要解析 # 找到"-name value" pair # 前进两步 # 前进一步 # 跳过该参数 日期范围 运行脚本任务 离线任务重跑 | 2.500122 | 3 |
fabfile.py | ojengwa/zapcore | 0 | 6619958 | # -*- coding: utf-8 -*- ##
import uuid
from contextlib import contextmanager
from fabric.api import env, prompt, local
from fabric.context_managers import lcd as cd, shell_env
def source_settings():
env.SOURCE_PATH = '/zapcore'
env.colorize_errors = True
env.git_path = '<EMAIL>:ojengwa/zapcore.git'
def ifnotsetted(key, default, is_prompt=False, text=None, validate=None):
if not (key in env and env[key]):
if is_prompt:
prompt(text, key, default, validate)
else:
env['key'] = default
def prompts():
source_settings()
ifnotsetted('DATABASE_URL', '', True, "Database URL")
def common_settings():
source_settings()
@contextmanager
def zappa_env():
orig_shell = env['shell']
env_vars_str = ' '.join('{0}={1}'.format(key, value)
for key, value in env.items())
env['shell'] = '{0} {1}'.format(env_vars_str, orig_shell)
yield
env['shell'] = orig_shell
def dev_server():
with cd('./client'):
local('npm run build')
local('python manage.py runserver')
def shell():
local('python manage.py shell')
def db(command='init'):
local('python manage.py db {0}'.format(command))
def test():
local('python manage.py test')
def assets(environment, node_env='production'):
with cd('./client'), shell_env(
NODE_ENV='{0}'.format(node_env),
FLASK_ENV=environment):
local('npm run build')
local('python manage.py collectstatic')
def init():
common_settings()
with zappa_env():
local('zappa init')
def deploy(environment='--all'):
common_settings()
with zappa_env():
local('zappa deploy {0}'.format(environment))
def undeploy(environment, remove_logs=False):
source_settings()
if remove_logs:
cmd = 'zappa undeploy {0} --remove-logs'.format(environment)
else:
cmd = 'zappa undeploy {0}'.format(environment)
with zappa_env():
assets(environment)
local(cmd)
def update(environment):
common_settings()
with zappa_env():
assets(environment)
local('zappa update {0}'.format(environment))
def rollback(environment, revisions=1):
common_settings()
with zappa_env():
local('zappa rollback {0} -n {1}'.format(environment, revisions))
def schedule(environment):
common_settings()
with zappa_env():
local('zappa schedule {0}'.format(environment))
def unschedule(environment):
common_settings()
with zappa_env():
local('zappa unschedule {0}'.format(environment))
def pack(environment, storage_path='{0}.zip'.format(uuid.uuid4())):
common_settings()
with zappa_env():
local('zappa package {0} -o {1}'.format(environment, storage_path))
def cloudformation(lambda_arn, role_arn, environment):
common_settings()
with zappa_env():
local(
'zappa template {0} --l {1} -r {2}'.format(
environment, lambda_arn, role_arn))
def status(environment):
common_settings()
with zappa_env():
local('zappa status {0}'.format(environment))
def log(environment, events='', since='30m'):
common_settings()
with zappa_env():
local(
'zappa tail {0} {1} --since {2}'.format(
environment, events, since))
def invoke(environment, cmd, verbose=True):
common_settings()
if verbose:
text = 'zappa invoke {0} --{1} --raw'.format(environment, cmd)
else:
text = 'zappa invoke {0} --{1}'.format(environment, cmd)
with zappa_env():
local(text)
| # -*- coding: utf-8 -*- ##
import uuid
from contextlib import contextmanager
from fabric.api import env, prompt, local
from fabric.context_managers import lcd as cd, shell_env
def source_settings():
env.SOURCE_PATH = '/zapcore'
env.colorize_errors = True
env.git_path = '<EMAIL>:ojengwa/zapcore.git'
def ifnotsetted(key, default, is_prompt=False, text=None, validate=None):
if not (key in env and env[key]):
if is_prompt:
prompt(text, key, default, validate)
else:
env['key'] = default
def prompts():
source_settings()
ifnotsetted('DATABASE_URL', '', True, "Database URL")
def common_settings():
source_settings()
@contextmanager
def zappa_env():
orig_shell = env['shell']
env_vars_str = ' '.join('{0}={1}'.format(key, value)
for key, value in env.items())
env['shell'] = '{0} {1}'.format(env_vars_str, orig_shell)
yield
env['shell'] = orig_shell
def dev_server():
with cd('./client'):
local('npm run build')
local('python manage.py runserver')
def shell():
local('python manage.py shell')
def db(command='init'):
local('python manage.py db {0}'.format(command))
def test():
local('python manage.py test')
def assets(environment, node_env='production'):
with cd('./client'), shell_env(
NODE_ENV='{0}'.format(node_env),
FLASK_ENV=environment):
local('npm run build')
local('python manage.py collectstatic')
def init():
common_settings()
with zappa_env():
local('zappa init')
def deploy(environment='--all'):
common_settings()
with zappa_env():
local('zappa deploy {0}'.format(environment))
def undeploy(environment, remove_logs=False):
source_settings()
if remove_logs:
cmd = 'zappa undeploy {0} --remove-logs'.format(environment)
else:
cmd = 'zappa undeploy {0}'.format(environment)
with zappa_env():
assets(environment)
local(cmd)
def update(environment):
common_settings()
with zappa_env():
assets(environment)
local('zappa update {0}'.format(environment))
def rollback(environment, revisions=1):
common_settings()
with zappa_env():
local('zappa rollback {0} -n {1}'.format(environment, revisions))
def schedule(environment):
common_settings()
with zappa_env():
local('zappa schedule {0}'.format(environment))
def unschedule(environment):
common_settings()
with zappa_env():
local('zappa unschedule {0}'.format(environment))
def pack(environment, storage_path='{0}.zip'.format(uuid.uuid4())):
common_settings()
with zappa_env():
local('zappa package {0} -o {1}'.format(environment, storage_path))
def cloudformation(lambda_arn, role_arn, environment):
common_settings()
with zappa_env():
local(
'zappa template {0} --l {1} -r {2}'.format(
environment, lambda_arn, role_arn))
def status(environment):
common_settings()
with zappa_env():
local('zappa status {0}'.format(environment))
def log(environment, events='', since='30m'):
common_settings()
with zappa_env():
local(
'zappa tail {0} {1} --since {2}'.format(
environment, events, since))
def invoke(environment, cmd, verbose=True):
common_settings()
if verbose:
text = 'zappa invoke {0} --{1} --raw'.format(environment, cmd)
else:
text = 'zappa invoke {0} --{1}'.format(environment, cmd)
with zappa_env():
local(text)
| en | 0.655584 | # -*- coding: utf-8 -*- ## | 2.007878 | 2 |
python/make_gif.py | symisc/pixlab | 96 | 6619959 | <reponame>symisc/pixlab
import requests
import json
# Generate GIF from a set of static image
# https://pixlab.io/#/cmd?id=makegif
req = requests.post('https://api.pixlab.io/makegif',headers={'Content-Type':'application/json'},data=json.dumps({
'key':'My_Pix_Key',
'frames': [
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/266-512.png"
},
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/267-512.png"
},
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/278-512.png"
},
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/279-512.png"
}
]
}))
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("GIF location: "+ reply['link'])
| import requests
import json
# Generate GIF from a set of static image
# https://pixlab.io/#/cmd?id=makegif
req = requests.post('https://api.pixlab.io/makegif',headers={'Content-Type':'application/json'},data=json.dumps({
'key':'My_Pix_Key',
'frames': [
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/266-512.png"
},
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/267-512.png"
},
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/278-512.png"
},
{
"img":"https://cdn1.iconfinder.com/data/icons/human-6/48/279-512.png"
}
]
}))
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("GIF location: "+ reply['link']) | en | 0.593446 | # Generate GIF from a set of static image # https://pixlab.io/#/cmd?id=makegif | 3.058012 | 3 |
model_center/tokenizer/base_tokenizer.py | zh-zheng/ModelCenter | 4 | 6619960 | <filename>model_center/tokenizer/base_tokenizer.py
# coding=utf-8
import os
from typing import Union
import torch
import bmtrain as bmt
from model_center.utils import check_web_and_convert_path
class BaseTokenizer:
"""
The current implementation is mainly to adapt the training framework of the Transformers toolkit,
and replace the original model implementation.
TODO we will change to our SAM implementation in the future, which will be a more efficient tokenizer
"""
def __init__(self, tokenizer_type):
self.tokenizer_type = tokenizer_type
def from_pretrained(self, pretrained_model_name_or_path: Union[str, os.PathLike], *args, **kwargs):
pretrained_model_name_or_path = check_web_and_convert_path(pretrained_model_name_or_path, 'tokenizer')
return self.tokenizer_type.from_pretrained(pretrained_model_name_or_path, *args, **kwargs)
| <filename>model_center/tokenizer/base_tokenizer.py
# coding=utf-8
import os
from typing import Union
import torch
import bmtrain as bmt
from model_center.utils import check_web_and_convert_path
class BaseTokenizer:
"""
The current implementation is mainly to adapt the training framework of the Transformers toolkit,
and replace the original model implementation.
TODO we will change to our SAM implementation in the future, which will be a more efficient tokenizer
"""
def __init__(self, tokenizer_type):
self.tokenizer_type = tokenizer_type
def from_pretrained(self, pretrained_model_name_or_path: Union[str, os.PathLike], *args, **kwargs):
pretrained_model_name_or_path = check_web_and_convert_path(pretrained_model_name_or_path, 'tokenizer')
return self.tokenizer_type.from_pretrained(pretrained_model_name_or_path, *args, **kwargs)
| en | 0.869807 | # coding=utf-8 The current implementation is mainly to adapt the training framework of the Transformers toolkit, and replace the original model implementation. TODO we will change to our SAM implementation in the future, which will be a more efficient tokenizer | 2.199986 | 2 |
src/nfvlib/nfv_forwarding_decision.py | harpratap/nfv-mpls | 0 | 6619961 | class SDNForwardingDecision():
_id = None
_controller = None
_match = None
_actions = None
def __init__(self, match, actions, controller):
_match = match
_actions = actions
def commit(self):
#FIXME: implement
return None
def setIngressPacketHandler(self, method):
#FIXME: implement
return None
def setEgressPacketHandler(self, method):
#FIXME: implement
return None
def _sendForwardingDecision():
#FIXME: implement
return None
def _removeForwardingDecision():
#FIXME: implement
return None
| class SDNForwardingDecision():
_id = None
_controller = None
_match = None
_actions = None
def __init__(self, match, actions, controller):
_match = match
_actions = actions
def commit(self):
#FIXME: implement
return None
def setIngressPacketHandler(self, method):
#FIXME: implement
return None
def setEgressPacketHandler(self, method):
#FIXME: implement
return None
def _sendForwardingDecision():
#FIXME: implement
return None
def _removeForwardingDecision():
#FIXME: implement
return None
| en | 0.219785 | #FIXME: implement #FIXME: implement #FIXME: implement #FIXME: implement #FIXME: implement | 2.171442 | 2 |
main.py | Angkirat/MachineLearning-MovieAnalysis | 1 | 6619962 | import pandas as pd
from pandas.core.reshape.merge import merge
from scipy.sparse import data
import DataCleaning as dc
import time
import modeling as model
from scrape import get_stats
dataCleaning = {
'SingleValue_onehotEncoding': ['View Rating','Runtime']
,'MultiValue_onehotEncoding': ['Genre', 'Tags', 'Languages', 'Country Availability']
,'booleanColumns': ['Series or Movie']
,'convertBoolean': ['Awards Received','Awards Nominated For']
,'numericCleaning': ['viewCount','likeCount','dislikeCount','favoriteCount','commentCount', 'IMDb Votes']
,'columnsToRemove': ['Title','Director','Writer','Actors','Production House','Netflix Link','IMDb Link',
'Summary','Image','Poster','trailer_link','Trailer Site','video_id','stats','kind','etag',
'items','pageInfo.totalResults','pageInfo.resultsPerPage','Boxoffice']
,'targetColumn': ['Hidden Gem Score','IMDb Score','Rotten Tomatoes Score','Metacritic Score', 'IMDb Votes']
,'dateColumn': ['Release Date','Netflix Release Date']
}
def single_onehot_encoding(acutalDF: pd.DataFrame):
df = pd.DataFrame()
for col in dataCleaning['SingleValue_onehotEncoding']:
cleanDF = dc.one_hot_single(acutalDF[col])
cleanDF.columns = [f'{col}_{st}' for st in cleanDF.columns]
df = pd.concat([df, cleanDF], axis=1)
return pd.concat([acutalDF, df], axis=1)
def multi_onehot_encoding(acutalDF: pd.DataFrame):
df = pd.DataFrame()
for col in dataCleaning['MultiValue_onehotEncoding']:
cleanDF = dc.one_hot_single(acutalDF[col])
cleanDF.columns = [f'{col}_{st}' for st in cleanDF.columns]
df = pd.concat([df, cleanDF], axis=1)
return pd.concat([acutalDF, df], axis=1)
def convert_boolean(actualDF: pd.DataFrame):
for col in dataCleaning['booleanColumns']:
actualDF[col] = dc.binary_column(actualDF[col])
for col in dataCleaning['convertBoolean']:
actualDF[col] = actualDF[col].isna().astype(int)
return actualDF
def numeric_data_cleaning(acutalDF:pd.DataFrame):
acutalDF = dc.numeric_column_cleaning(acutalDF)
for col in dataCleaning['numericCleaning']:
acutalDF[col] = dc.standard_scaler(acutalDF[col])
return acutalDF
def data_cleaning_operation(inputDF: pd.DataFrame):
inputDF = single_onehot_encoding(inputDF)
inputDF = multi_onehot_encoding(inputDF)
inputDF = convert_boolean(inputDF)
inputDF = numeric_data_cleaning(inputDF)
inputDF.drop(dataCleaning['SingleValue_onehotEncoding'], axis=1, inplace=True)
inputDF.drop(dataCleaning['MultiValue_onehotEncoding'], axis=1, inplace=True)
return inputDF
def clean_data(DF:pd.DataFrame, yt_stats:pd.DataFrame):
start_time = time.time()
DF = DF.rename(columns={'TMDb Trailer': 'trailer_link'}).drop_duplicates(subset=['trailer_link'])
yt_stats = yt_stats.drop_duplicates(subset=['trailer_link'])
complete_DF = pd.merge(DF, yt_stats, how="inner", on="trailer_link")
complete_DF.drop(dataCleaning['columnsToRemove'], axis=1, inplace=True)
complete_DF = complete_DF.drop(complete_DF[complete_DF['viewCount'].isna()].index)
cleaned_DF = data_cleaning_operation(complete_DF)
cleaned_DF['FinalScore'] = cleaned_DF[dataCleaning['targetColumn']].mean(axis=1)
cleaned_DF['target_column'] = cleaned_DF['FinalScore'] < cleaned_DF['FinalScore'].mean()
cleaned_DF.drop('FinalScore', axis=1, inplace=True)
cleaned_DF.drop(dataCleaning['targetColumn'], axis=1, inplace=True)
cleaned_DF.drop(dataCleaning['dateColumn'], axis=1, inplace=True)
print(cleaned_DF.shape)
cleaned_DF.dropna(how="any", inplace=True)
print(cleaned_DF.shape)
NumericColumns = dataCleaning['numericCleaning'] + dataCleaning['targetColumn']
cleaningColumns = [col for col in cleaned_DF.columns if (col not in NumericColumns) or (col != 'target_column')]
print(f'columns to clean are {len(cleaningColumns)}')
row_count = cleaned_DF.shape[0]
for col in cleaningColumns:
col_sum = cleaned_DF[col].sum()
if col_sum == 0:
cleaned_DF.drop(col, axis=1, inplace=True)
elif col_sum == row_count:
cleaned_DF.drop(col, axis=1, inplace=True)
print(cleaned_DF.shape)
cleaned_DF.columns = [col.replace(' ', '_') for col in cleaned_DF.columns]
return cleaned_DF
if __name__ == '__main__':
DF = pd.read_excel('FlixGem.com Dataset - Latest Netflix data with thousands of attributes.xlsx',sheet_name='FlixGem.com dataset')
yt_status = get_stats(DF['TMDb Trailer'])
clean_data = clean_data(DF, yt_status)
model.main(clean_data, 'target_column')
pass
| import pandas as pd
from pandas.core.reshape.merge import merge
from scipy.sparse import data
import DataCleaning as dc
import time
import modeling as model
from scrape import get_stats
dataCleaning = {
'SingleValue_onehotEncoding': ['View Rating','Runtime']
,'MultiValue_onehotEncoding': ['Genre', 'Tags', 'Languages', 'Country Availability']
,'booleanColumns': ['Series or Movie']
,'convertBoolean': ['Awards Received','Awards Nominated For']
,'numericCleaning': ['viewCount','likeCount','dislikeCount','favoriteCount','commentCount', 'IMDb Votes']
,'columnsToRemove': ['Title','Director','Writer','Actors','Production House','Netflix Link','IMDb Link',
'Summary','Image','Poster','trailer_link','Trailer Site','video_id','stats','kind','etag',
'items','pageInfo.totalResults','pageInfo.resultsPerPage','Boxoffice']
,'targetColumn': ['Hidden Gem Score','IMDb Score','Rotten Tomatoes Score','Metacritic Score', 'IMDb Votes']
,'dateColumn': ['Release Date','Netflix Release Date']
}
def single_onehot_encoding(acutalDF: pd.DataFrame):
df = pd.DataFrame()
for col in dataCleaning['SingleValue_onehotEncoding']:
cleanDF = dc.one_hot_single(acutalDF[col])
cleanDF.columns = [f'{col}_{st}' for st in cleanDF.columns]
df = pd.concat([df, cleanDF], axis=1)
return pd.concat([acutalDF, df], axis=1)
def multi_onehot_encoding(acutalDF: pd.DataFrame):
df = pd.DataFrame()
for col in dataCleaning['MultiValue_onehotEncoding']:
cleanDF = dc.one_hot_single(acutalDF[col])
cleanDF.columns = [f'{col}_{st}' for st in cleanDF.columns]
df = pd.concat([df, cleanDF], axis=1)
return pd.concat([acutalDF, df], axis=1)
def convert_boolean(actualDF: pd.DataFrame):
for col in dataCleaning['booleanColumns']:
actualDF[col] = dc.binary_column(actualDF[col])
for col in dataCleaning['convertBoolean']:
actualDF[col] = actualDF[col].isna().astype(int)
return actualDF
def numeric_data_cleaning(acutalDF:pd.DataFrame):
acutalDF = dc.numeric_column_cleaning(acutalDF)
for col in dataCleaning['numericCleaning']:
acutalDF[col] = dc.standard_scaler(acutalDF[col])
return acutalDF
def data_cleaning_operation(inputDF: pd.DataFrame):
inputDF = single_onehot_encoding(inputDF)
inputDF = multi_onehot_encoding(inputDF)
inputDF = convert_boolean(inputDF)
inputDF = numeric_data_cleaning(inputDF)
inputDF.drop(dataCleaning['SingleValue_onehotEncoding'], axis=1, inplace=True)
inputDF.drop(dataCleaning['MultiValue_onehotEncoding'], axis=1, inplace=True)
return inputDF
def clean_data(DF:pd.DataFrame, yt_stats:pd.DataFrame):
start_time = time.time()
DF = DF.rename(columns={'TMDb Trailer': 'trailer_link'}).drop_duplicates(subset=['trailer_link'])
yt_stats = yt_stats.drop_duplicates(subset=['trailer_link'])
complete_DF = pd.merge(DF, yt_stats, how="inner", on="trailer_link")
complete_DF.drop(dataCleaning['columnsToRemove'], axis=1, inplace=True)
complete_DF = complete_DF.drop(complete_DF[complete_DF['viewCount'].isna()].index)
cleaned_DF = data_cleaning_operation(complete_DF)
cleaned_DF['FinalScore'] = cleaned_DF[dataCleaning['targetColumn']].mean(axis=1)
cleaned_DF['target_column'] = cleaned_DF['FinalScore'] < cleaned_DF['FinalScore'].mean()
cleaned_DF.drop('FinalScore', axis=1, inplace=True)
cleaned_DF.drop(dataCleaning['targetColumn'], axis=1, inplace=True)
cleaned_DF.drop(dataCleaning['dateColumn'], axis=1, inplace=True)
print(cleaned_DF.shape)
cleaned_DF.dropna(how="any", inplace=True)
print(cleaned_DF.shape)
NumericColumns = dataCleaning['numericCleaning'] + dataCleaning['targetColumn']
cleaningColumns = [col for col in cleaned_DF.columns if (col not in NumericColumns) or (col != 'target_column')]
print(f'columns to clean are {len(cleaningColumns)}')
row_count = cleaned_DF.shape[0]
for col in cleaningColumns:
col_sum = cleaned_DF[col].sum()
if col_sum == 0:
cleaned_DF.drop(col, axis=1, inplace=True)
elif col_sum == row_count:
cleaned_DF.drop(col, axis=1, inplace=True)
print(cleaned_DF.shape)
cleaned_DF.columns = [col.replace(' ', '_') for col in cleaned_DF.columns]
return cleaned_DF
if __name__ == '__main__':
DF = pd.read_excel('FlixGem.com Dataset - Latest Netflix data with thousands of attributes.xlsx',sheet_name='FlixGem.com dataset')
yt_status = get_stats(DF['TMDb Trailer'])
clean_data = clean_data(DF, yt_status)
model.main(clean_data, 'target_column')
pass
| none | 1 | 2.373925 | 2 | |
boot.py | s-light/reflow_controller | 0 | 6619963 | #!/usr/bin/env python3
# coding=utf-8
"""
setup
"""
import usb_cdc
# https://learn.adafruit.com/customizing-usb-devices-in-circuitpython/circuitpy-midi-serial
print("usb_cdc: enable console & data")
usb_cdc.enable(console=True, data=True)
| #!/usr/bin/env python3
# coding=utf-8
"""
setup
"""
import usb_cdc
# https://learn.adafruit.com/customizing-usb-devices-in-circuitpython/circuitpy-midi-serial
print("usb_cdc: enable console & data")
usb_cdc.enable(console=True, data=True)
| en | 0.444277 | #!/usr/bin/env python3 # coding=utf-8 setup # https://learn.adafruit.com/customizing-usb-devices-in-circuitpython/circuitpy-midi-serial | 2.119631 | 2 |
libapparmor/utils/pyronia/easyprof.py | pyronia-sys/libpyronia | 0 | 6619964 | <gh_stars>0
# ------------------------------------------------------------------
#
# Copyright (C) 2011-2015 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License published by the Free Software Foundation.
#
# ------------------------------------------------------------------
from __future__ import with_statement
import codecs
import copy
import glob
import json
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
#
# TODO: move this out to the common library
#
#from apparmor import AppArmorException
class AppArmorException(Exception):
'''This class represents AppArmor exceptions'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#
# End common
#
DEBUGGING = False
#
# TODO: move this out to a utilities library
#
def error(out, exit_code=1, do_exit=True):
'''Print error message and exit'''
try:
sys.stderr.write("ERROR: %s\n" % (out))
except IOError:
pass
if do_exit:
sys.exit(exit_code)
def warn(out):
'''Print warning message'''
try:
sys.stderr.write("WARN: %s\n" % (out))
except IOError:
pass
def msg(out, output=sys.stdout):
'''Print message'''
try:
sys.stdout.write("%s\n" % (out))
except IOError:
pass
def cmd(command):
'''Try to execute the given command.'''
debug(command)
try:
sp = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError as ex:
return [127, str(ex)]
out = sp.communicate()[0]
return [sp.returncode, out]
def cmd_pipe(command1, command2):
'''Try to pipe command1 into command2.'''
try:
sp1 = subprocess.Popen(command1, stdout=subprocess.PIPE)
sp2 = subprocess.Popen(command2, stdin=sp1.stdout)
except OSError as ex:
return [127, str(ex)]
out = sp2.communicate()[0]
return [sp2.returncode, out]
def debug(out):
'''Print debug message'''
if DEBUGGING:
try:
sys.stderr.write("DEBUG: %s\n" % (out))
except IOError:
pass
def valid_binary_path(path):
'''Validate name'''
try:
a_path = os.path.abspath(path)
except Exception:
debug("Could not find absolute path for binary")
return False
if path != a_path:
debug("Binary should use a normalized absolute path")
return False
if not os.path.exists(a_path):
return True
r_path = os.path.realpath(path)
if r_path != a_path:
debug("Binary should not be a symlink")
return False
return True
def valid_variable(v):
'''Validate variable name'''
debug("Checking '%s'" % v)
try:
(key, value) = v.split('=')
except Exception:
return False
if not re.search(r'^@\{[a-zA-Z0-9_]+\}$', key):
return False
if '/' in value:
rel_ok = False
if not value.startswith('/'):
rel_ok = True
if not valid_path(value, relative_ok=rel_ok):
return False
if '"' in value:
return False
# If we made it here, we are safe
return True
def valid_path(path, relative_ok=False):
'''Valid path'''
m = "Invalid path: %s" % (path)
if not relative_ok and not path.startswith('/'):
debug("%s (relative)" % (m))
return False
if '"' in path: # We double quote elsewhere
debug("%s (quote)" % (m))
return False
if '../' in path:
debug("%s (../ path escape)" % (m))
return False
try:
p = os.path.normpath(path)
except Exception:
debug("%s (could not normalize)" % (m))
return False
if p != path:
debug("%s (normalized path != path (%s != %s))" % (m, p, path))
return False
# If we made it here, we are safe
return True
def _is_safe(s):
'''Known safe regex'''
if re.search(r'^[a-zA-Z_0-9\-\.]+$', s):
return True
return False
def valid_policy_vendor(s):
'''Verify the policy vendor'''
return _is_safe(s)
def valid_policy_version(v):
'''Verify the policy version'''
try:
float(v)
except ValueError:
return False
if float(v) < 0:
return False
return True
def valid_template_name(s, strict=False):
'''Verify the template name'''
if not strict and s.startswith('/'):
if not valid_path(s):
return False
return True
return _is_safe(s)
def valid_abstraction_name(s):
'''Verify the template name'''
return _is_safe(s)
def valid_profile_name(s):
'''Verify the profile name'''
# profile name specifies path
if s.startswith('/'):
if not valid_path(s):
return False
return True
# profile name does not specify path
# alpha-numeric and Debian version, plus '_'
if re.search(r'^[a-zA-Z0-9][a-zA-Z0-9_\+\-\.:~]+$', s):
return True
return False
def valid_policy_group_name(s):
'''Verify policy group name'''
return _is_safe(s)
def get_directory_contents(path):
'''Find contents of the given directory'''
if not valid_path(path):
return None
files = []
for f in glob.glob(path + "/*"):
files.append(f)
files.sort()
return files
def open_file_read(path):
'''Open specified file read-only'''
try:
orig = codecs.open(path, 'r', "UTF-8")
except Exception:
raise
return orig
def verify_policy(policy):
'''Verify policy compiles'''
exe = "/sbin/apparmor_parser"
if not os.path.exists(exe):
rc, exe = cmd(['which', 'apparmor_parser'])
if rc != 0:
warn("Could not find apparmor_parser. Skipping verify")
return True
fn = ""
# if policy starts with '/' and is one line, assume it is a path
if len(policy.splitlines()) == 1 and valid_path(policy):
fn = policy
else:
f, fn = tempfile.mkstemp(prefix='aa-easyprof')
if not isinstance(policy, bytes):
policy = policy.encode('utf-8')
os.write(f, policy)
os.close(f)
rc, out = cmd([exe, '-QTK', fn])
os.unlink(fn)
if rc == 0:
return True
return False
#
# End utility functions
#
class AppArmorEasyProfile:
'''Easy profile class'''
def __init__(self, binary, opt):
verify_options(opt)
opt.ensure_value("conffile", "/etc/pyronia/easyprof.conf")
self.conffile = os.path.abspath(opt.conffile)
debug("Examining confile=%s" % (self.conffile))
self.dirs = dict()
if os.path.isfile(self.conffile):
self._get_defaults()
if opt.templates_dir and os.path.isdir(opt.templates_dir):
self.dirs['templates'] = os.path.abspath(opt.templates_dir)
elif not opt.templates_dir and \
opt.template and \
os.path.isfile(opt.template) and \
valid_path(opt.template):
# If we specified the template and it is an absolute path, just set
# the templates directory to the parent of the template so we don't
# have to require --template-dir with absolute paths.
self.dirs['templates'] = os.path.abspath(os.path.dirname(opt.template))
if opt.include_templates_dir and \
os.path.isdir(opt.include_templates_dir):
self.dirs['templates_include'] = os.path.abspath(opt.include_templates_dir)
if opt.policy_groups_dir and os.path.isdir(opt.policy_groups_dir):
self.dirs['policygroups'] = os.path.abspath(opt.policy_groups_dir)
if opt.include_policy_groups_dir and \
os.path.isdir(opt.include_policy_groups_dir):
self.dirs['policygroups_include'] = os.path.abspath(opt.include_policy_groups_dir)
self.policy_version = None
self.policy_vendor = None
if (opt.policy_version and not opt.policy_vendor) or \
(opt.policy_vendor and not opt.policy_version):
raise AppArmorException("Must specify both policy version and vendor")
# If specified --policy-version and --policy-vendor, use
# templates_dir/policy_vendor/policy_version
if opt.policy_version and opt.policy_vendor:
self.policy_vendor = opt.policy_vendor
self.policy_version = str(opt.policy_version)
for i in ['templates', 'policygroups']:
d = os.path.join(self.dirs[i], \
self.policy_vendor, \
self.policy_version)
if not os.path.isdir(d):
raise AppArmorException(
"Could not find %s directory '%s'" % (i, d))
self.dirs[i] = d
if not 'templates' in self.dirs:
raise AppArmorException("Could not find templates directory")
if not 'policygroups' in self.dirs:
raise AppArmorException("Could not find policygroups directory")
self.aa_topdir = "/etc/pyronia.d"
self.binary = binary
if binary:
if not valid_binary_path(binary):
raise AppArmorException("Invalid path for binary: '%s'" % binary)
if opt.manifest:
self.set_template(opt.template, allow_abs_path=False)
else:
self.set_template(opt.template)
self.set_policygroup(opt.policy_groups)
if opt.name:
self.set_name(opt.name)
elif self.binary != None:
self.set_name(self.binary)
self.templates = []
for f in get_directory_contents(self.dirs['templates']):
if os.path.isfile(f):
self.templates.append(f)
if 'templates_include' in self.dirs:
for f in get_directory_contents(self.dirs['templates_include']):
if os.path.isfile(f) and f not in self.templates:
self.templates.append(f)
self.policy_groups = []
for f in get_directory_contents(self.dirs['policygroups']):
if os.path.isfile(f):
self.policy_groups.append(f)
if 'policygroups_include' in self.dirs:
for f in get_directory_contents(self.dirs['policygroups_include']):
if os.path.isfile(f) and f not in self.policy_groups:
self.policy_groups.append(f)
def _get_defaults(self):
'''Read in defaults from configuration'''
if not os.path.exists(self.conffile):
raise AppArmorException("Could not find '%s'" % self.conffile)
# Read in the configuration
f = open_file_read(self.conffile)
pat = re.compile(r'^\w+=".*"?')
for line in f:
if not pat.search(line):
continue
if line.startswith("POLICYGROUPS_DIR="):
d = re.split(r'=', line.strip())[1].strip('["\']')
self.dirs['policygroups'] = d
elif line.startswith("TEMPLATES_DIR="):
d = re.split(r'=', line.strip())[1].strip('["\']')
self.dirs['templates'] = d
f.close()
keys = self.dirs.keys()
if 'templates' not in keys:
raise AppArmorException("Could not find TEMPLATES_DIR in '%s'" % self.conffile)
if 'policygroups' not in keys:
raise AppArmorException("Could not find POLICYGROUPS_DIR in '%s'" % self.conffile)
for k in self.dirs.keys():
if not os.path.isdir(self.dirs[k]):
raise AppArmorException("Could not find '%s'" % self.dirs[k])
def set_name(self, name):
'''Set name of policy'''
self.name = name
def get_template(self):
'''Get contents of current template'''
return open(self.template).read()
def set_template(self, template, allow_abs_path=True):
'''Set current template'''
if "../" in template:
raise AppArmorException('template "%s" contains "../" escape path' % (template))
elif template.startswith('/') and not allow_abs_path:
raise AppArmorException("Cannot use an absolute path template '%s'" % template)
# If have an abs path, just use it
if template.startswith('/'):
if not os.path.exists(template):
raise AppArmorException('%s does not exist' % (template))
self.template = template
return
# Find the template since we don't have an abs path
sys_t = os.path.join(self.dirs['templates'], template)
inc_t = None
if 'templates_include' in self.dirs:
inc_t = os.path.join(self.dirs['templates_include'], template)
if os.path.exists(sys_t):
self.template = sys_t
elif inc_t is not None and os.path.exists(inc_t):
self.template = inc_t
else:
raise AppArmorException('%s does not exist' % (template))
def get_templates(self):
'''Get list of all available templates by filename'''
return self.templates
def get_policygroup(self, policygroup):
'''Get contents of specific policygroup'''
p = policygroup
if not p.startswith('/'):
sys_p = os.path.join(self.dirs['policygroups'], p)
inc_p = None
if 'policygroups_include' in self.dirs:
inc_p = os.path.join(self.dirs['policygroups_include'], p)
if os.path.exists(sys_p):
p = sys_p
elif inc_p is not None and os.path.exists(inc_p):
p = inc_p
if self.policy_groups == None or not p in self.policy_groups:
raise AppArmorException("Policy group '%s' does not exist" % p)
return open(p).read()
def set_policygroup(self, policygroups):
'''Set policygroups'''
self.policy_groups = []
if policygroups != None:
for p in policygroups.split(','):
# If have abs path, just use it
if p.startswith('/'):
if not os.path.exists(p):
raise AppArmorException('%s does not exist' % (p))
self.policy_groups.append(p)
continue
# Find the policy group since we don't have and abs path
sys_p = os.path.join(self.dirs['policygroups'], p)
inc_p = None
if 'policygroups_include' in self.dirs:
inc_p = os.path.join(self.dirs['policygroups_include'], p)
if os.path.exists(sys_p):
self.policy_groups.append(sys_p)
elif inc_p is not None and os.path.exists(inc_p):
self.policy_groups.append(inc_p)
else:
raise AppArmorException('%s does not exist' % (p))
def get_policy_groups(self):
'''Get list of all policy groups by filename'''
return self.policy_groups
def gen_abstraction_rule(self, abstraction):
'''Generate an abstraction rule'''
p = os.path.join(self.aa_topdir, "abstractions", abstraction)
if not os.path.exists(p):
raise AppArmorException("%s does not exist" % p)
return "#include <abstractions/%s>" % abstraction
def gen_variable_declaration(self, dec):
'''Generate a variable declaration'''
if not valid_variable(dec):
raise AppArmorException("Invalid variable declaration '%s'" % dec)
# Make sure we always quote
k, v = dec.split('=')
return '%s="%s"' % (k, v)
def gen_path_rule(self, path, access):
rule = []
if not path.startswith('/') and not path.startswith('@'):
raise AppArmorException("'%s' should not be relative path" % path)
owner = ""
if path.startswith('/home/') or path.startswith("@{HOME"):
owner = "owner "
if path.endswith('/'):
rule.append("%s %s," % (path, access))
rule.append("%s%s** %s," % (owner, path, access))
elif path.endswith('/**') or path.endswith('/*'):
rule.append("%s %s," % (os.path.dirname(path), access))
rule.append("%s%s %s," % (owner, path, access))
else:
rule.append("%s%s %s," % (owner, path, access))
return rule
def gen_policy(self, name,
binary=None,
profile_name=None,
template_var=[],
abstractions=None,
policy_groups=None,
read_path=[],
write_path=[],
author=None,
comment=None,
copyright=None,
no_verify=False):
def find_prefix(t, s):
'''Calculate whitespace prefix based on occurrence of s in t'''
pat = re.compile(r'^ *%s' % s)
p = ""
for line in t.splitlines():
if pat.match(line):
p = " " * (len(line) - len(line.lstrip()))
break
return p
policy = self.get_template()
if '###ENDUSAGE###' in policy:
found = False
tmp = ""
for line in policy.splitlines():
if not found:
if line.startswith('###ENDUSAGE###'):
found = True
continue
tmp += line + "\n"
policy = tmp
attachment = ""
if binary:
if not valid_binary_path(binary):
raise AppArmorException("Invalid path for binary: '%s'" % \
binary)
if profile_name:
attachment = 'profile "%s" "%s"' % (profile_name, binary)
else:
attachment = '"%s"' % binary
elif profile_name:
attachment = 'profile "%s"' % profile_name
else:
raise AppArmorException("Must specify binary and/or profile name")
policy = re.sub(r'###PROFILEATTACH###', attachment, policy)
policy = re.sub(r'###NAME###', name, policy)
# Fill-in various comment fields
if comment != None:
policy = re.sub(r'###COMMENT###', "Comment: %s" % comment, policy)
if author != None:
policy = re.sub(r'###AUTHOR###', "Author: %s" % author, policy)
if copyright != None:
policy = re.sub(r'###COPYRIGHT###', "Copyright: %s" % copyright, policy)
# Fill-in rules and variables with proper indenting
search = '###ABSTRACTIONS###'
prefix = find_prefix(policy, search)
s = "%s# No abstractions specified" % prefix
if abstractions != None:
s = "%s# Specified abstractions" % (prefix)
t = abstractions.split(',')
t.sort()
for i in t:
s += "\n%s%s" % (prefix, self.gen_abstraction_rule(i))
policy = re.sub(r' *%s' % search, s, policy)
search = '###POLICYGROUPS###'
prefix = find_prefix(policy, search)
s = "%s# No policy groups specified" % prefix
if policy_groups != None:
s = "%s# Rules specified via policy groups" % (prefix)
t = policy_groups.split(',')
t.sort()
for i in t:
for line in self.get_policygroup(i).splitlines():
s += "\n%s%s" % (prefix, line)
if i != policy_groups.split(',')[-1]:
s += "\n"
policy = re.sub(r' *%s' % search, s, policy)
search = '###VAR###'
prefix = find_prefix(policy, search)
s = "%s# No template variables specified" % prefix
if len(template_var) > 0:
s = "%s# Specified profile variables" % (prefix)
template_var.sort()
for i in template_var:
s += "\n%s%s" % (prefix, self.gen_variable_declaration(i))
policy = re.sub(r' *%s' % search, s, policy)
search = '###READS###'
prefix = find_prefix(policy, search)
s = "%s# No read paths specified" % prefix
if len(read_path) > 0:
s = "%s# Specified read permissions" % (prefix)
read_path.sort()
for i in read_path:
for r in self.gen_path_rule(i, 'rk'):
s += "\n%s%s" % (prefix, r)
policy = re.sub(r' *%s' % search, s, policy)
search = '###WRITES###'
prefix = find_prefix(policy, search)
s = "%s# No write paths specified" % prefix
if len(write_path) > 0:
s = "%s# Specified write permissions" % (prefix)
write_path.sort()
for i in write_path:
for r in self.gen_path_rule(i, 'rwk'):
s += "\n%s%s" % (prefix, r)
policy = re.sub(r' *%s' % search, s, policy)
if no_verify:
debug("Skipping policy verification")
elif not verify_policy(policy):
msg("\n" + policy)
raise AppArmorException("Invalid policy")
return policy
def output_policy(self, params, count=0, dir=None):
'''Output policy'''
policy = self.gen_policy(**params)
if not dir:
if count:
sys.stdout.write('### aa-easyprof profile #%d ###\n' % count)
sys.stdout.write('%s\n' % policy)
else:
out_fn = ""
if 'profile_name' in params:
out_fn = params['profile_name']
elif 'binary' in params:
out_fn = params['binary']
else: # should not ever reach this
raise AppArmorException("Could not determine output filename")
# Generate an absolute path, convertng any path delimiters to '.'
out_fn = os.path.join(dir, re.sub(r'/', '.', out_fn.lstrip('/')))
if os.path.exists(out_fn):
raise AppArmorException("'%s' already exists" % out_fn)
if not os.path.exists(dir):
os.mkdir(dir)
if not os.path.isdir(dir):
raise AppArmorException("'%s' is not a directory" % dir)
f, fn = tempfile.mkstemp(prefix='aa-easyprof')
if not isinstance(policy, bytes):
policy = policy.encode('utf-8')
os.write(f, policy)
os.close(f)
shutil.move(fn, out_fn)
def gen_manifest(self, params):
'''Take params list and output a JSON file'''
d = dict()
d['security'] = dict()
d['security']['profiles'] = dict()
pkey = ""
if 'profile_name' in params:
pkey = params['profile_name']
elif 'binary' in params:
# when profile_name is not specified, the binary (path attachment)
# also functions as the profile name
pkey = params['binary']
else:
raise AppArmorException("Must supply binary or profile name")
d['security']['profiles'][pkey] = dict()
# Add the template since it isn't part of 'params'
template = os.path.basename(self.template)
if template != 'default':
d['security']['profiles'][pkey]['template'] = template
# Add the policy_version since it isn't part of 'params'
if self.policy_version:
d['security']['profiles'][pkey]['policy_version'] = float(self.policy_version)
if self.policy_vendor:
d['security']['profiles'][pkey]['policy_vendor'] = self.policy_vendor
for key in params:
if key == 'profile_name' or \
(key == 'binary' and not 'profile_name' in params):
continue # don't re-add the pkey
elif key == 'binary' and not params[key]:
continue # binary can by None when specifying --profile-name
elif key == 'template_var':
d['security']['profiles'][pkey]['template_variables'] = dict()
for tvar in params[key]:
if not self.gen_variable_declaration(tvar):
raise AppArmorException("Malformed template_var '%s'" % tvar)
(k, v) = tvar.split('=')
k = k.lstrip('@').lstrip('{').rstrip('}')
d['security']['profiles'][pkey]['template_variables'][k] = v
elif key == 'abstractions' or key == 'policy_groups':
d['security']['profiles'][pkey][key] = params[key].split(",")
d['security']['profiles'][pkey][key].sort()
else:
d['security']['profiles'][pkey][key] = params[key]
json_str = json.dumps(d,
sort_keys=True,
indent=2,
separators=(',', ': ')
)
return json_str
def print_basefilenames(files):
for i in files:
sys.stdout.write("%s\n" % (os.path.basename(i)))
def print_files(files):
for i in files:
with open(i) as f:
sys.stdout.write(f.read()+"\n")
def check_manifest_conflict_args(option, opt_str, value, parser):
'''Check for -m/--manifest with conflicting args'''
conflict_args = ['abstractions',
'read_path',
'write_path',
# template always get set to 'default', can't conflict
# 'template',
'policy_groups',
'policy_version',
'policy_vendor',
'name',
'profile_name',
'comment',
'copyright',
'author',
'template_var']
for conflict in conflict_args:
if getattr(parser.values, conflict, False):
raise optparse.OptionValueError("can't use --%s with --manifest " \
"argument" % conflict)
setattr(parser.values, option.dest, value)
def check_for_manifest_arg(option, opt_str, value, parser):
'''Check for -m/--manifest with conflicting args'''
if parser.values.manifest:
raise optparse.OptionValueError("can't use --%s with --manifest " \
"argument" % opt_str.lstrip('-'))
setattr(parser.values, option.dest, value)
def check_for_manifest_arg_append(option, opt_str, value, parser):
'''Check for -m/--manifest with conflicting args (with append)'''
if parser.values.manifest:
raise optparse.OptionValueError("can't use --%s with --manifest " \
"argument" % opt_str.lstrip('-'))
parser.values.ensure_value(option.dest, []).append(value)
def add_parser_policy_args(parser):
'''Add parser arguments'''
parser.add_option("-a", "--abstractions",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="abstractions",
help="Comma-separated list of abstractions",
metavar="ABSTRACTIONS")
parser.add_option("--read-path",
action="callback",
callback=check_for_manifest_arg_append,
type=str,
dest="read_path",
help="Path allowing owner reads",
metavar="PATH")
parser.add_option("--write-path",
action="callback",
callback=check_for_manifest_arg_append,
type=str,
dest="write_path",
help="Path allowing owner writes",
metavar="PATH")
parser.add_option("-t", "--template",
dest="template",
help="Use non-default policy template",
metavar="TEMPLATE",
default='default')
parser.add_option("--templates-dir",
dest="templates_dir",
help="Use non-default templates directory",
metavar="DIR")
parser.add_option("--include-templates-dir",
dest="include_templates_dir",
help="Also search DIR for templates",
metavar="DIR")
parser.add_option("-p", "--policy-groups",
action="callback",
callback=check_for_manifest_arg,
type=str,
help="Comma-separated list of policy groups",
metavar="POLICYGROUPS")
parser.add_option("--policy-groups-dir",
dest="policy_groups_dir",
help="Use non-default policy-groups directory",
metavar="DIR")
parser.add_option("--include-policy-groups-dir",
dest="include_policy_groups_dir",
help="Also search DIR for policy groups",
metavar="DIR")
parser.add_option("--policy-version",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="policy_version",
help="Specify version for templates and policy groups",
metavar="VERSION")
parser.add_option("--policy-vendor",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="policy_vendor",
help="Specify vendor for templates and policy groups",
metavar="VENDOR")
parser.add_option("--profile-name",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="profile_name",
help="Pyronia profile name",
metavar="PROFILENAME")
def parse_args(args=None, parser=None):
'''Parse arguments'''
global DEBUGGING
if parser == None:
parser = optparse.OptionParser()
parser.add_option("-c", "--config-file",
dest="conffile",
help="Use alternate configuration file",
metavar="FILE")
parser.add_option("-d", "--debug",
help="Show debugging output",
action='store_true',
default=False)
parser.add_option("--no-verify",
help="Don't verify policy using 'apparmor_parser -p'",
action='store_true',
default=False)
parser.add_option("--list-templates",
help="List available templates",
action='store_true',
default=False)
parser.add_option("--show-template",
help="Show specified template",
action='store_true',
default=False)
parser.add_option("--list-policy-groups",
help="List available policy groups",
action='store_true',
default=False)
parser.add_option("--show-policy-group",
help="Show specified policy groups",
action='store_true',
default=False)
parser.add_option("-n", "--name",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="name",
help="Name of policy (not AppArmor profile name)",
metavar="COMMENT")
parser.add_option("--comment",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="comment",
help="Comment for policy",
metavar="COMMENT")
parser.add_option("--author",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="author",
help="Author of policy",
metavar="COMMENT")
parser.add_option("--copyright",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="copyright",
help="Copyright for policy",
metavar="COMMENT")
parser.add_option("--template-var",
action="callback",
callback=check_for_manifest_arg_append,
type=str,
dest="template_var",
help="Declare AppArmor variable",
metavar="@{VARIABLE}=VALUE")
parser.add_option("--output-format",
action="store",
dest="output_format",
help="Specify output format as text (default) or json",
metavar="FORMAT",
default="text")
parser.add_option("--output-directory",
action="store",
dest="output_directory",
help="Output policy to this directory",
metavar="DIR")
# This option conflicts with any of the value arguments, e.g. name,
# author, template-var, etc.
parser.add_option("-m", "--manifest",
action="callback",
callback=check_manifest_conflict_args,
type=str,
dest="manifest",
help="JSON manifest file",
metavar="FILE")
parser.add_option("--verify-manifest",
action="store_true",
default=False,
dest="verify_manifest",
help="Verify JSON manifest file")
# add policy args now
add_parser_policy_args(parser)
(my_opt, my_args) = parser.parse_args(args)
if my_opt.debug:
DEBUGGING = True
return (my_opt, my_args)
def gen_policy_params(binary, opt):
'''Generate parameters for gen_policy'''
params = dict(binary=binary)
if not binary and not opt.profile_name:
raise AppArmorException("Must specify binary and/or profile name")
if opt.profile_name:
params['profile_name'] = opt.profile_name
if opt.name:
params['name'] = opt.name
else:
if opt.profile_name:
params['name'] = opt.profile_name
elif binary:
params['name'] = os.path.basename(binary)
if opt.template_var: # What about specified multiple times?
params['template_var'] = opt.template_var
if opt.abstractions:
params['abstractions'] = opt.abstractions
if opt.policy_groups:
params['policy_groups'] = opt.policy_groups
if opt.read_path:
params['read_path'] = opt.read_path
if opt.write_path:
params['write_path'] = opt.write_path
if opt.comment:
params['comment'] = opt.comment
if opt.author:
params['author'] = opt.author
if opt.copyright:
params['copyright'] = opt.copyright
if opt.policy_version and opt.output_format == "json":
params['policy_version'] = opt.policy_version
if opt.policy_vendor and opt.output_format == "json":
params['policy_vendor'] = opt.policy_vendor
return params
def parse_manifest(manifest, opt_orig):
'''Take a JSON manifest as a string and updates options, returning an
updated binary. Note that a JSON file may contain multiple profiles.'''
try:
m = json.loads(manifest)
except ValueError:
raise AppArmorException("Could not parse manifest")
if 'security' in m:
top_table = m['security']
else:
top_table = m
if 'profiles' not in top_table:
raise AppArmorException("Could not parse manifest (could not find 'profiles')")
table = top_table['profiles']
# generally mirrors what is settable in gen_policy_params()
valid_keys = ['abstractions',
'author',
'binary',
'comment',
'copyright',
'name',
'policy_groups',
'policy_version',
'policy_vendor',
'profile_name',
'read_path',
'template',
'template_variables',
'write_path',
]
profiles = []
for profile_name in table:
if not isinstance(table[profile_name], dict):
raise AppArmorException("Wrong JSON structure")
opt = copy.deepcopy(opt_orig)
# The JSON structure is:
# {
# "security": {
# <profile_name>: {
# "binary": ...
# ...
# but because binary can be the profile name, we need to handle
# 'profile_name' and 'binary' special. If a profile_name starts with
# '/', then it is considered the binary. Otherwise, set the
# profile_name and set the binary if it is in the JSON.
binary = None
if profile_name.startswith('/'):
if 'binary' in table[profile_name]:
raise AppArmorException("Profile name should not specify path with binary")
binary = profile_name
else:
setattr(opt, 'profile_name', profile_name)
if 'binary' in table[profile_name]:
binary = table[profile_name]['binary']
setattr(opt, 'binary', binary)
for key in table[profile_name]:
if key not in valid_keys:
raise AppArmorException("Invalid key '%s'" % key)
if key == 'binary':
continue # handled above
elif key == 'abstractions' or key == 'policy_groups':
setattr(opt, key, ",".join(table[profile_name][key]))
elif key == "template_variables":
t = table[profile_name]['template_variables']
vlist = []
for v in t.keys():
vlist.append("@{%s}=%s" % (v, t[v]))
setattr(opt, 'template_var', vlist)
else:
if hasattr(opt, key):
setattr(opt, key, table[profile_name][key])
profiles.append( (binary, opt) )
return profiles
def verify_options(opt, strict=False):
'''Make sure our options are valid'''
if hasattr(opt, 'binary') and opt.binary and not valid_path(opt.binary):
raise AppArmorException("Invalid binary '%s'" % opt.binary)
if hasattr(opt, 'profile_name') and opt.profile_name != None and \
not valid_profile_name(opt.profile_name):
raise AppArmorException("Invalid profile name '%s'" % opt.profile_name)
if hasattr(opt, 'binary') and opt.binary and \
hasattr(opt, 'profile_name') and opt.profile_name != None and \
opt.profile_name.startswith('/'):
raise AppArmorException("Profile name should not specify path with binary")
if hasattr(opt, 'policy_vendor') and opt.policy_vendor and \
not valid_policy_vendor(opt.policy_vendor):
raise AppArmorException("Invalid policy vendor '%s'" % \
opt.policy_vendor)
if hasattr(opt, 'policy_version') and opt.policy_version and \
not valid_policy_version(opt.policy_version):
raise AppArmorException("Invalid policy version '%s'" % \
opt.policy_version)
if hasattr(opt, 'template') and opt.template and \
not valid_template_name(opt.template, strict):
raise AppArmorException("Invalid template '%s'" % opt.template)
if hasattr(opt, 'template_var') and opt.template_var:
for i in opt.template_var:
if not valid_variable(i):
raise AppArmorException("Invalid variable '%s'" % i)
if hasattr(opt, 'policy_groups') and opt.policy_groups:
for i in opt.policy_groups.split(','):
if not valid_policy_group_name(i):
raise AppArmorException("Invalid policy group '%s'" % i)
if hasattr(opt, 'abstractions') and opt.abstractions:
for i in opt.abstractions.split(','):
if not valid_abstraction_name(i):
raise AppArmorException("Invalid abstraction '%s'" % i)
if hasattr(opt, 'read_paths') and opt.read_paths:
for i in opt.read_paths:
if not valid_path(i):
raise AppArmorException("Invalid read path '%s'" % i)
if hasattr(opt, 'write_paths') and opt.write_paths:
for i in opt.write_paths:
if not valid_path(i):
raise AppArmorException("Invalid write path '%s'" % i)
def verify_manifest(params, args=None):
'''Verify manifest for safe and unsafe options'''
err_str = ""
(opt, args) = parse_args(args)
fake_easyp = AppArmorEasyProfile(None, opt)
unsafe_keys = ['read_path', 'write_path']
safe_abstractions = ['base']
for k in params:
debug("Examining %s=%s" % (k, params[k]))
if k in unsafe_keys:
err_str += "\nfound %s key" % k
elif k == 'profile_name':
if params['profile_name'].startswith('/') or \
'*' in params['profile_name']:
err_str += "\nprofile_name '%s'" % params['profile_name']
elif k == 'abstractions':
for a in params['abstractions'].split(','):
if not a in safe_abstractions:
err_str += "\nfound '%s' abstraction" % a
elif k == "template_var":
pat = re.compile(r'[*/\{\}\[\]]')
for tv in params['template_var']:
if not fake_easyp.gen_variable_declaration(tv):
err_str += "\n%s" % tv
continue
tv_val = tv.split('=')[1]
debug("Examining %s" % tv_val)
if '..' in tv_val or pat.search(tv_val):
err_str += "\n%s" % tv
if err_str:
warn("Manifest definition is potentially unsafe%s" % err_str)
return False
return True
| # ------------------------------------------------------------------
#
# Copyright (C) 2011-2015 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License published by the Free Software Foundation.
#
# ------------------------------------------------------------------
from __future__ import with_statement
import codecs
import copy
import glob
import json
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
#
# TODO: move this out to the common library
#
#from apparmor import AppArmorException
class AppArmorException(Exception):
'''This class represents AppArmor exceptions'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#
# End common
#
DEBUGGING = False
#
# TODO: move this out to a utilities library
#
def error(out, exit_code=1, do_exit=True):
'''Print error message and exit'''
try:
sys.stderr.write("ERROR: %s\n" % (out))
except IOError:
pass
if do_exit:
sys.exit(exit_code)
def warn(out):
'''Print warning message'''
try:
sys.stderr.write("WARN: %s\n" % (out))
except IOError:
pass
def msg(out, output=sys.stdout):
'''Print message'''
try:
sys.stdout.write("%s\n" % (out))
except IOError:
pass
def cmd(command):
'''Try to execute the given command.'''
debug(command)
try:
sp = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError as ex:
return [127, str(ex)]
out = sp.communicate()[0]
return [sp.returncode, out]
def cmd_pipe(command1, command2):
'''Try to pipe command1 into command2.'''
try:
sp1 = subprocess.Popen(command1, stdout=subprocess.PIPE)
sp2 = subprocess.Popen(command2, stdin=sp1.stdout)
except OSError as ex:
return [127, str(ex)]
out = sp2.communicate()[0]
return [sp2.returncode, out]
def debug(out):
'''Print debug message'''
if DEBUGGING:
try:
sys.stderr.write("DEBUG: %s\n" % (out))
except IOError:
pass
def valid_binary_path(path):
'''Validate name'''
try:
a_path = os.path.abspath(path)
except Exception:
debug("Could not find absolute path for binary")
return False
if path != a_path:
debug("Binary should use a normalized absolute path")
return False
if not os.path.exists(a_path):
return True
r_path = os.path.realpath(path)
if r_path != a_path:
debug("Binary should not be a symlink")
return False
return True
def valid_variable(v):
'''Validate variable name'''
debug("Checking '%s'" % v)
try:
(key, value) = v.split('=')
except Exception:
return False
if not re.search(r'^@\{[a-zA-Z0-9_]+\}$', key):
return False
if '/' in value:
rel_ok = False
if not value.startswith('/'):
rel_ok = True
if not valid_path(value, relative_ok=rel_ok):
return False
if '"' in value:
return False
# If we made it here, we are safe
return True
def valid_path(path, relative_ok=False):
'''Valid path'''
m = "Invalid path: %s" % (path)
if not relative_ok and not path.startswith('/'):
debug("%s (relative)" % (m))
return False
if '"' in path: # We double quote elsewhere
debug("%s (quote)" % (m))
return False
if '../' in path:
debug("%s (../ path escape)" % (m))
return False
try:
p = os.path.normpath(path)
except Exception:
debug("%s (could not normalize)" % (m))
return False
if p != path:
debug("%s (normalized path != path (%s != %s))" % (m, p, path))
return False
# If we made it here, we are safe
return True
def _is_safe(s):
'''Known safe regex'''
if re.search(r'^[a-zA-Z_0-9\-\.]+$', s):
return True
return False
def valid_policy_vendor(s):
'''Verify the policy vendor'''
return _is_safe(s)
def valid_policy_version(v):
'''Verify the policy version'''
try:
float(v)
except ValueError:
return False
if float(v) < 0:
return False
return True
def valid_template_name(s, strict=False):
'''Verify the template name'''
if not strict and s.startswith('/'):
if not valid_path(s):
return False
return True
return _is_safe(s)
def valid_abstraction_name(s):
'''Verify the template name'''
return _is_safe(s)
def valid_profile_name(s):
'''Verify the profile name'''
# profile name specifies path
if s.startswith('/'):
if not valid_path(s):
return False
return True
# profile name does not specify path
# alpha-numeric and Debian version, plus '_'
if re.search(r'^[a-zA-Z0-9][a-zA-Z0-9_\+\-\.:~]+$', s):
return True
return False
def valid_policy_group_name(s):
'''Verify policy group name'''
return _is_safe(s)
def get_directory_contents(path):
'''Find contents of the given directory'''
if not valid_path(path):
return None
files = []
for f in glob.glob(path + "/*"):
files.append(f)
files.sort()
return files
def open_file_read(path):
'''Open specified file read-only'''
try:
orig = codecs.open(path, 'r', "UTF-8")
except Exception:
raise
return orig
def verify_policy(policy):
'''Verify policy compiles'''
exe = "/sbin/apparmor_parser"
if not os.path.exists(exe):
rc, exe = cmd(['which', 'apparmor_parser'])
if rc != 0:
warn("Could not find apparmor_parser. Skipping verify")
return True
fn = ""
# if policy starts with '/' and is one line, assume it is a path
if len(policy.splitlines()) == 1 and valid_path(policy):
fn = policy
else:
f, fn = tempfile.mkstemp(prefix='aa-easyprof')
if not isinstance(policy, bytes):
policy = policy.encode('utf-8')
os.write(f, policy)
os.close(f)
rc, out = cmd([exe, '-QTK', fn])
os.unlink(fn)
if rc == 0:
return True
return False
#
# End utility functions
#
class AppArmorEasyProfile:
'''Easy profile class'''
def __init__(self, binary, opt):
verify_options(opt)
opt.ensure_value("conffile", "/etc/pyronia/easyprof.conf")
self.conffile = os.path.abspath(opt.conffile)
debug("Examining confile=%s" % (self.conffile))
self.dirs = dict()
if os.path.isfile(self.conffile):
self._get_defaults()
if opt.templates_dir and os.path.isdir(opt.templates_dir):
self.dirs['templates'] = os.path.abspath(opt.templates_dir)
elif not opt.templates_dir and \
opt.template and \
os.path.isfile(opt.template) and \
valid_path(opt.template):
# If we specified the template and it is an absolute path, just set
# the templates directory to the parent of the template so we don't
# have to require --template-dir with absolute paths.
self.dirs['templates'] = os.path.abspath(os.path.dirname(opt.template))
if opt.include_templates_dir and \
os.path.isdir(opt.include_templates_dir):
self.dirs['templates_include'] = os.path.abspath(opt.include_templates_dir)
if opt.policy_groups_dir and os.path.isdir(opt.policy_groups_dir):
self.dirs['policygroups'] = os.path.abspath(opt.policy_groups_dir)
if opt.include_policy_groups_dir and \
os.path.isdir(opt.include_policy_groups_dir):
self.dirs['policygroups_include'] = os.path.abspath(opt.include_policy_groups_dir)
self.policy_version = None
self.policy_vendor = None
if (opt.policy_version and not opt.policy_vendor) or \
(opt.policy_vendor and not opt.policy_version):
raise AppArmorException("Must specify both policy version and vendor")
# If specified --policy-version and --policy-vendor, use
# templates_dir/policy_vendor/policy_version
if opt.policy_version and opt.policy_vendor:
self.policy_vendor = opt.policy_vendor
self.policy_version = str(opt.policy_version)
for i in ['templates', 'policygroups']:
d = os.path.join(self.dirs[i], \
self.policy_vendor, \
self.policy_version)
if not os.path.isdir(d):
raise AppArmorException(
"Could not find %s directory '%s'" % (i, d))
self.dirs[i] = d
if not 'templates' in self.dirs:
raise AppArmorException("Could not find templates directory")
if not 'policygroups' in self.dirs:
raise AppArmorException("Could not find policygroups directory")
self.aa_topdir = "/etc/pyronia.d"
self.binary = binary
if binary:
if not valid_binary_path(binary):
raise AppArmorException("Invalid path for binary: '%s'" % binary)
if opt.manifest:
self.set_template(opt.template, allow_abs_path=False)
else:
self.set_template(opt.template)
self.set_policygroup(opt.policy_groups)
if opt.name:
self.set_name(opt.name)
elif self.binary != None:
self.set_name(self.binary)
self.templates = []
for f in get_directory_contents(self.dirs['templates']):
if os.path.isfile(f):
self.templates.append(f)
if 'templates_include' in self.dirs:
for f in get_directory_contents(self.dirs['templates_include']):
if os.path.isfile(f) and f not in self.templates:
self.templates.append(f)
self.policy_groups = []
for f in get_directory_contents(self.dirs['policygroups']):
if os.path.isfile(f):
self.policy_groups.append(f)
if 'policygroups_include' in self.dirs:
for f in get_directory_contents(self.dirs['policygroups_include']):
if os.path.isfile(f) and f not in self.policy_groups:
self.policy_groups.append(f)
def _get_defaults(self):
'''Read in defaults from configuration'''
if not os.path.exists(self.conffile):
raise AppArmorException("Could not find '%s'" % self.conffile)
# Read in the configuration
f = open_file_read(self.conffile)
pat = re.compile(r'^\w+=".*"?')
for line in f:
if not pat.search(line):
continue
if line.startswith("POLICYGROUPS_DIR="):
d = re.split(r'=', line.strip())[1].strip('["\']')
self.dirs['policygroups'] = d
elif line.startswith("TEMPLATES_DIR="):
d = re.split(r'=', line.strip())[1].strip('["\']')
self.dirs['templates'] = d
f.close()
keys = self.dirs.keys()
if 'templates' not in keys:
raise AppArmorException("Could not find TEMPLATES_DIR in '%s'" % self.conffile)
if 'policygroups' not in keys:
raise AppArmorException("Could not find POLICYGROUPS_DIR in '%s'" % self.conffile)
for k in self.dirs.keys():
if not os.path.isdir(self.dirs[k]):
raise AppArmorException("Could not find '%s'" % self.dirs[k])
def set_name(self, name):
'''Set name of policy'''
self.name = name
def get_template(self):
'''Get contents of current template'''
return open(self.template).read()
def set_template(self, template, allow_abs_path=True):
'''Set current template'''
if "../" in template:
raise AppArmorException('template "%s" contains "../" escape path' % (template))
elif template.startswith('/') and not allow_abs_path:
raise AppArmorException("Cannot use an absolute path template '%s'" % template)
# If have an abs path, just use it
if template.startswith('/'):
if not os.path.exists(template):
raise AppArmorException('%s does not exist' % (template))
self.template = template
return
# Find the template since we don't have an abs path
sys_t = os.path.join(self.dirs['templates'], template)
inc_t = None
if 'templates_include' in self.dirs:
inc_t = os.path.join(self.dirs['templates_include'], template)
if os.path.exists(sys_t):
self.template = sys_t
elif inc_t is not None and os.path.exists(inc_t):
self.template = inc_t
else:
raise AppArmorException('%s does not exist' % (template))
def get_templates(self):
'''Get list of all available templates by filename'''
return self.templates
def get_policygroup(self, policygroup):
'''Get contents of specific policygroup'''
p = policygroup
if not p.startswith('/'):
sys_p = os.path.join(self.dirs['policygroups'], p)
inc_p = None
if 'policygroups_include' in self.dirs:
inc_p = os.path.join(self.dirs['policygroups_include'], p)
if os.path.exists(sys_p):
p = sys_p
elif inc_p is not None and os.path.exists(inc_p):
p = inc_p
if self.policy_groups == None or not p in self.policy_groups:
raise AppArmorException("Policy group '%s' does not exist" % p)
return open(p).read()
def set_policygroup(self, policygroups):
'''Set policygroups'''
self.policy_groups = []
if policygroups != None:
for p in policygroups.split(','):
# If have abs path, just use it
if p.startswith('/'):
if not os.path.exists(p):
raise AppArmorException('%s does not exist' % (p))
self.policy_groups.append(p)
continue
# Find the policy group since we don't have and abs path
sys_p = os.path.join(self.dirs['policygroups'], p)
inc_p = None
if 'policygroups_include' in self.dirs:
inc_p = os.path.join(self.dirs['policygroups_include'], p)
if os.path.exists(sys_p):
self.policy_groups.append(sys_p)
elif inc_p is not None and os.path.exists(inc_p):
self.policy_groups.append(inc_p)
else:
raise AppArmorException('%s does not exist' % (p))
def get_policy_groups(self):
'''Get list of all policy groups by filename'''
return self.policy_groups
def gen_abstraction_rule(self, abstraction):
'''Generate an abstraction rule'''
p = os.path.join(self.aa_topdir, "abstractions", abstraction)
if not os.path.exists(p):
raise AppArmorException("%s does not exist" % p)
return "#include <abstractions/%s>" % abstraction
def gen_variable_declaration(self, dec):
'''Generate a variable declaration'''
if not valid_variable(dec):
raise AppArmorException("Invalid variable declaration '%s'" % dec)
# Make sure we always quote
k, v = dec.split('=')
return '%s="%s"' % (k, v)
def gen_path_rule(self, path, access):
rule = []
if not path.startswith('/') and not path.startswith('@'):
raise AppArmorException("'%s' should not be relative path" % path)
owner = ""
if path.startswith('/home/') or path.startswith("@{HOME"):
owner = "owner "
if path.endswith('/'):
rule.append("%s %s," % (path, access))
rule.append("%s%s** %s," % (owner, path, access))
elif path.endswith('/**') or path.endswith('/*'):
rule.append("%s %s," % (os.path.dirname(path), access))
rule.append("%s%s %s," % (owner, path, access))
else:
rule.append("%s%s %s," % (owner, path, access))
return rule
def gen_policy(self, name,
binary=None,
profile_name=None,
template_var=[],
abstractions=None,
policy_groups=None,
read_path=[],
write_path=[],
author=None,
comment=None,
copyright=None,
no_verify=False):
def find_prefix(t, s):
'''Calculate whitespace prefix based on occurrence of s in t'''
pat = re.compile(r'^ *%s' % s)
p = ""
for line in t.splitlines():
if pat.match(line):
p = " " * (len(line) - len(line.lstrip()))
break
return p
policy = self.get_template()
if '###ENDUSAGE###' in policy:
found = False
tmp = ""
for line in policy.splitlines():
if not found:
if line.startswith('###ENDUSAGE###'):
found = True
continue
tmp += line + "\n"
policy = tmp
attachment = ""
if binary:
if not valid_binary_path(binary):
raise AppArmorException("Invalid path for binary: '%s'" % \
binary)
if profile_name:
attachment = 'profile "%s" "%s"' % (profile_name, binary)
else:
attachment = '"%s"' % binary
elif profile_name:
attachment = 'profile "%s"' % profile_name
else:
raise AppArmorException("Must specify binary and/or profile name")
policy = re.sub(r'###PROFILEATTACH###', attachment, policy)
policy = re.sub(r'###NAME###', name, policy)
# Fill-in various comment fields
if comment != None:
policy = re.sub(r'###COMMENT###', "Comment: %s" % comment, policy)
if author != None:
policy = re.sub(r'###AUTHOR###', "Author: %s" % author, policy)
if copyright != None:
policy = re.sub(r'###COPYRIGHT###', "Copyright: %s" % copyright, policy)
# Fill-in rules and variables with proper indenting
search = '###ABSTRACTIONS###'
prefix = find_prefix(policy, search)
s = "%s# No abstractions specified" % prefix
if abstractions != None:
s = "%s# Specified abstractions" % (prefix)
t = abstractions.split(',')
t.sort()
for i in t:
s += "\n%s%s" % (prefix, self.gen_abstraction_rule(i))
policy = re.sub(r' *%s' % search, s, policy)
search = '###POLICYGROUPS###'
prefix = find_prefix(policy, search)
s = "%s# No policy groups specified" % prefix
if policy_groups != None:
s = "%s# Rules specified via policy groups" % (prefix)
t = policy_groups.split(',')
t.sort()
for i in t:
for line in self.get_policygroup(i).splitlines():
s += "\n%s%s" % (prefix, line)
if i != policy_groups.split(',')[-1]:
s += "\n"
policy = re.sub(r' *%s' % search, s, policy)
search = '###VAR###'
prefix = find_prefix(policy, search)
s = "%s# No template variables specified" % prefix
if len(template_var) > 0:
s = "%s# Specified profile variables" % (prefix)
template_var.sort()
for i in template_var:
s += "\n%s%s" % (prefix, self.gen_variable_declaration(i))
policy = re.sub(r' *%s' % search, s, policy)
search = '###READS###'
prefix = find_prefix(policy, search)
s = "%s# No read paths specified" % prefix
if len(read_path) > 0:
s = "%s# Specified read permissions" % (prefix)
read_path.sort()
for i in read_path:
for r in self.gen_path_rule(i, 'rk'):
s += "\n%s%s" % (prefix, r)
policy = re.sub(r' *%s' % search, s, policy)
search = '###WRITES###'
prefix = find_prefix(policy, search)
s = "%s# No write paths specified" % prefix
if len(write_path) > 0:
s = "%s# Specified write permissions" % (prefix)
write_path.sort()
for i in write_path:
for r in self.gen_path_rule(i, 'rwk'):
s += "\n%s%s" % (prefix, r)
policy = re.sub(r' *%s' % search, s, policy)
if no_verify:
debug("Skipping policy verification")
elif not verify_policy(policy):
msg("\n" + policy)
raise AppArmorException("Invalid policy")
return policy
def output_policy(self, params, count=0, dir=None):
'''Output policy'''
policy = self.gen_policy(**params)
if not dir:
if count:
sys.stdout.write('### aa-easyprof profile #%d ###\n' % count)
sys.stdout.write('%s\n' % policy)
else:
out_fn = ""
if 'profile_name' in params:
out_fn = params['profile_name']
elif 'binary' in params:
out_fn = params['binary']
else: # should not ever reach this
raise AppArmorException("Could not determine output filename")
# Generate an absolute path, convertng any path delimiters to '.'
out_fn = os.path.join(dir, re.sub(r'/', '.', out_fn.lstrip('/')))
if os.path.exists(out_fn):
raise AppArmorException("'%s' already exists" % out_fn)
if not os.path.exists(dir):
os.mkdir(dir)
if not os.path.isdir(dir):
raise AppArmorException("'%s' is not a directory" % dir)
f, fn = tempfile.mkstemp(prefix='aa-easyprof')
if not isinstance(policy, bytes):
policy = policy.encode('utf-8')
os.write(f, policy)
os.close(f)
shutil.move(fn, out_fn)
def gen_manifest(self, params):
'''Take params list and output a JSON file'''
d = dict()
d['security'] = dict()
d['security']['profiles'] = dict()
pkey = ""
if 'profile_name' in params:
pkey = params['profile_name']
elif 'binary' in params:
# when profile_name is not specified, the binary (path attachment)
# also functions as the profile name
pkey = params['binary']
else:
raise AppArmorException("Must supply binary or profile name")
d['security']['profiles'][pkey] = dict()
# Add the template since it isn't part of 'params'
template = os.path.basename(self.template)
if template != 'default':
d['security']['profiles'][pkey]['template'] = template
# Add the policy_version since it isn't part of 'params'
if self.policy_version:
d['security']['profiles'][pkey]['policy_version'] = float(self.policy_version)
if self.policy_vendor:
d['security']['profiles'][pkey]['policy_vendor'] = self.policy_vendor
for key in params:
if key == 'profile_name' or \
(key == 'binary' and not 'profile_name' in params):
continue # don't re-add the pkey
elif key == 'binary' and not params[key]:
continue # binary can by None when specifying --profile-name
elif key == 'template_var':
d['security']['profiles'][pkey]['template_variables'] = dict()
for tvar in params[key]:
if not self.gen_variable_declaration(tvar):
raise AppArmorException("Malformed template_var '%s'" % tvar)
(k, v) = tvar.split('=')
k = k.lstrip('@').lstrip('{').rstrip('}')
d['security']['profiles'][pkey]['template_variables'][k] = v
elif key == 'abstractions' or key == 'policy_groups':
d['security']['profiles'][pkey][key] = params[key].split(",")
d['security']['profiles'][pkey][key].sort()
else:
d['security']['profiles'][pkey][key] = params[key]
json_str = json.dumps(d,
sort_keys=True,
indent=2,
separators=(',', ': ')
)
return json_str
def print_basefilenames(files):
for i in files:
sys.stdout.write("%s\n" % (os.path.basename(i)))
def print_files(files):
for i in files:
with open(i) as f:
sys.stdout.write(f.read()+"\n")
def check_manifest_conflict_args(option, opt_str, value, parser):
'''Check for -m/--manifest with conflicting args'''
conflict_args = ['abstractions',
'read_path',
'write_path',
# template always get set to 'default', can't conflict
# 'template',
'policy_groups',
'policy_version',
'policy_vendor',
'name',
'profile_name',
'comment',
'copyright',
'author',
'template_var']
for conflict in conflict_args:
if getattr(parser.values, conflict, False):
raise optparse.OptionValueError("can't use --%s with --manifest " \
"argument" % conflict)
setattr(parser.values, option.dest, value)
def check_for_manifest_arg(option, opt_str, value, parser):
'''Check for -m/--manifest with conflicting args'''
if parser.values.manifest:
raise optparse.OptionValueError("can't use --%s with --manifest " \
"argument" % opt_str.lstrip('-'))
setattr(parser.values, option.dest, value)
def check_for_manifest_arg_append(option, opt_str, value, parser):
'''Check for -m/--manifest with conflicting args (with append)'''
if parser.values.manifest:
raise optparse.OptionValueError("can't use --%s with --manifest " \
"argument" % opt_str.lstrip('-'))
parser.values.ensure_value(option.dest, []).append(value)
def add_parser_policy_args(parser):
'''Add parser arguments'''
parser.add_option("-a", "--abstractions",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="abstractions",
help="Comma-separated list of abstractions",
metavar="ABSTRACTIONS")
parser.add_option("--read-path",
action="callback",
callback=check_for_manifest_arg_append,
type=str,
dest="read_path",
help="Path allowing owner reads",
metavar="PATH")
parser.add_option("--write-path",
action="callback",
callback=check_for_manifest_arg_append,
type=str,
dest="write_path",
help="Path allowing owner writes",
metavar="PATH")
parser.add_option("-t", "--template",
dest="template",
help="Use non-default policy template",
metavar="TEMPLATE",
default='default')
parser.add_option("--templates-dir",
dest="templates_dir",
help="Use non-default templates directory",
metavar="DIR")
parser.add_option("--include-templates-dir",
dest="include_templates_dir",
help="Also search DIR for templates",
metavar="DIR")
parser.add_option("-p", "--policy-groups",
action="callback",
callback=check_for_manifest_arg,
type=str,
help="Comma-separated list of policy groups",
metavar="POLICYGROUPS")
parser.add_option("--policy-groups-dir",
dest="policy_groups_dir",
help="Use non-default policy-groups directory",
metavar="DIR")
parser.add_option("--include-policy-groups-dir",
dest="include_policy_groups_dir",
help="Also search DIR for policy groups",
metavar="DIR")
parser.add_option("--policy-version",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="policy_version",
help="Specify version for templates and policy groups",
metavar="VERSION")
parser.add_option("--policy-vendor",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="policy_vendor",
help="Specify vendor for templates and policy groups",
metavar="VENDOR")
parser.add_option("--profile-name",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="profile_name",
help="Pyronia profile name",
metavar="PROFILENAME")
def parse_args(args=None, parser=None):
'''Parse arguments'''
global DEBUGGING
if parser == None:
parser = optparse.OptionParser()
parser.add_option("-c", "--config-file",
dest="conffile",
help="Use alternate configuration file",
metavar="FILE")
parser.add_option("-d", "--debug",
help="Show debugging output",
action='store_true',
default=False)
parser.add_option("--no-verify",
help="Don't verify policy using 'apparmor_parser -p'",
action='store_true',
default=False)
parser.add_option("--list-templates",
help="List available templates",
action='store_true',
default=False)
parser.add_option("--show-template",
help="Show specified template",
action='store_true',
default=False)
parser.add_option("--list-policy-groups",
help="List available policy groups",
action='store_true',
default=False)
parser.add_option("--show-policy-group",
help="Show specified policy groups",
action='store_true',
default=False)
parser.add_option("-n", "--name",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="name",
help="Name of policy (not AppArmor profile name)",
metavar="COMMENT")
parser.add_option("--comment",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="comment",
help="Comment for policy",
metavar="COMMENT")
parser.add_option("--author",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="author",
help="Author of policy",
metavar="COMMENT")
parser.add_option("--copyright",
action="callback",
callback=check_for_manifest_arg,
type=str,
dest="copyright",
help="Copyright for policy",
metavar="COMMENT")
parser.add_option("--template-var",
action="callback",
callback=check_for_manifest_arg_append,
type=str,
dest="template_var",
help="Declare AppArmor variable",
metavar="@{VARIABLE}=VALUE")
parser.add_option("--output-format",
action="store",
dest="output_format",
help="Specify output format as text (default) or json",
metavar="FORMAT",
default="text")
parser.add_option("--output-directory",
action="store",
dest="output_directory",
help="Output policy to this directory",
metavar="DIR")
# This option conflicts with any of the value arguments, e.g. name,
# author, template-var, etc.
parser.add_option("-m", "--manifest",
action="callback",
callback=check_manifest_conflict_args,
type=str,
dest="manifest",
help="JSON manifest file",
metavar="FILE")
parser.add_option("--verify-manifest",
action="store_true",
default=False,
dest="verify_manifest",
help="Verify JSON manifest file")
# add policy args now
add_parser_policy_args(parser)
(my_opt, my_args) = parser.parse_args(args)
if my_opt.debug:
DEBUGGING = True
return (my_opt, my_args)
def gen_policy_params(binary, opt):
'''Generate parameters for gen_policy'''
params = dict(binary=binary)
if not binary and not opt.profile_name:
raise AppArmorException("Must specify binary and/or profile name")
if opt.profile_name:
params['profile_name'] = opt.profile_name
if opt.name:
params['name'] = opt.name
else:
if opt.profile_name:
params['name'] = opt.profile_name
elif binary:
params['name'] = os.path.basename(binary)
if opt.template_var: # What about specified multiple times?
params['template_var'] = opt.template_var
if opt.abstractions:
params['abstractions'] = opt.abstractions
if opt.policy_groups:
params['policy_groups'] = opt.policy_groups
if opt.read_path:
params['read_path'] = opt.read_path
if opt.write_path:
params['write_path'] = opt.write_path
if opt.comment:
params['comment'] = opt.comment
if opt.author:
params['author'] = opt.author
if opt.copyright:
params['copyright'] = opt.copyright
if opt.policy_version and opt.output_format == "json":
params['policy_version'] = opt.policy_version
if opt.policy_vendor and opt.output_format == "json":
params['policy_vendor'] = opt.policy_vendor
return params
def parse_manifest(manifest, opt_orig):
'''Take a JSON manifest as a string and updates options, returning an
updated binary. Note that a JSON file may contain multiple profiles.'''
try:
m = json.loads(manifest)
except ValueError:
raise AppArmorException("Could not parse manifest")
if 'security' in m:
top_table = m['security']
else:
top_table = m
if 'profiles' not in top_table:
raise AppArmorException("Could not parse manifest (could not find 'profiles')")
table = top_table['profiles']
# generally mirrors what is settable in gen_policy_params()
valid_keys = ['abstractions',
'author',
'binary',
'comment',
'copyright',
'name',
'policy_groups',
'policy_version',
'policy_vendor',
'profile_name',
'read_path',
'template',
'template_variables',
'write_path',
]
profiles = []
for profile_name in table:
if not isinstance(table[profile_name], dict):
raise AppArmorException("Wrong JSON structure")
opt = copy.deepcopy(opt_orig)
# The JSON structure is:
# {
# "security": {
# <profile_name>: {
# "binary": ...
# ...
# but because binary can be the profile name, we need to handle
# 'profile_name' and 'binary' special. If a profile_name starts with
# '/', then it is considered the binary. Otherwise, set the
# profile_name and set the binary if it is in the JSON.
binary = None
if profile_name.startswith('/'):
if 'binary' in table[profile_name]:
raise AppArmorException("Profile name should not specify path with binary")
binary = profile_name
else:
setattr(opt, 'profile_name', profile_name)
if 'binary' in table[profile_name]:
binary = table[profile_name]['binary']
setattr(opt, 'binary', binary)
for key in table[profile_name]:
if key not in valid_keys:
raise AppArmorException("Invalid key '%s'" % key)
if key == 'binary':
continue # handled above
elif key == 'abstractions' or key == 'policy_groups':
setattr(opt, key, ",".join(table[profile_name][key]))
elif key == "template_variables":
t = table[profile_name]['template_variables']
vlist = []
for v in t.keys():
vlist.append("@{%s}=%s" % (v, t[v]))
setattr(opt, 'template_var', vlist)
else:
if hasattr(opt, key):
setattr(opt, key, table[profile_name][key])
profiles.append( (binary, opt) )
return profiles
def verify_options(opt, strict=False):
'''Make sure our options are valid'''
if hasattr(opt, 'binary') and opt.binary and not valid_path(opt.binary):
raise AppArmorException("Invalid binary '%s'" % opt.binary)
if hasattr(opt, 'profile_name') and opt.profile_name != None and \
not valid_profile_name(opt.profile_name):
raise AppArmorException("Invalid profile name '%s'" % opt.profile_name)
if hasattr(opt, 'binary') and opt.binary and \
hasattr(opt, 'profile_name') and opt.profile_name != None and \
opt.profile_name.startswith('/'):
raise AppArmorException("Profile name should not specify path with binary")
if hasattr(opt, 'policy_vendor') and opt.policy_vendor and \
not valid_policy_vendor(opt.policy_vendor):
raise AppArmorException("Invalid policy vendor '%s'" % \
opt.policy_vendor)
if hasattr(opt, 'policy_version') and opt.policy_version and \
not valid_policy_version(opt.policy_version):
raise AppArmorException("Invalid policy version '%s'" % \
opt.policy_version)
if hasattr(opt, 'template') and opt.template and \
not valid_template_name(opt.template, strict):
raise AppArmorException("Invalid template '%s'" % opt.template)
if hasattr(opt, 'template_var') and opt.template_var:
for i in opt.template_var:
if not valid_variable(i):
raise AppArmorException("Invalid variable '%s'" % i)
if hasattr(opt, 'policy_groups') and opt.policy_groups:
for i in opt.policy_groups.split(','):
if not valid_policy_group_name(i):
raise AppArmorException("Invalid policy group '%s'" % i)
if hasattr(opt, 'abstractions') and opt.abstractions:
for i in opt.abstractions.split(','):
if not valid_abstraction_name(i):
raise AppArmorException("Invalid abstraction '%s'" % i)
if hasattr(opt, 'read_paths') and opt.read_paths:
for i in opt.read_paths:
if not valid_path(i):
raise AppArmorException("Invalid read path '%s'" % i)
if hasattr(opt, 'write_paths') and opt.write_paths:
for i in opt.write_paths:
if not valid_path(i):
raise AppArmorException("Invalid write path '%s'" % i)
def verify_manifest(params, args=None):
'''Verify manifest for safe and unsafe options'''
err_str = ""
(opt, args) = parse_args(args)
fake_easyp = AppArmorEasyProfile(None, opt)
unsafe_keys = ['read_path', 'write_path']
safe_abstractions = ['base']
for k in params:
debug("Examining %s=%s" % (k, params[k]))
if k in unsafe_keys:
err_str += "\nfound %s key" % k
elif k == 'profile_name':
if params['profile_name'].startswith('/') or \
'*' in params['profile_name']:
err_str += "\nprofile_name '%s'" % params['profile_name']
elif k == 'abstractions':
for a in params['abstractions'].split(','):
if not a in safe_abstractions:
err_str += "\nfound '%s' abstraction" % a
elif k == "template_var":
pat = re.compile(r'[*/\{\}\[\]]')
for tv in params['template_var']:
if not fake_easyp.gen_variable_declaration(tv):
err_str += "\n%s" % tv
continue
tv_val = tv.split('=')[1]
debug("Examining %s" % tv_val)
if '..' in tv_val or pat.search(tv_val):
err_str += "\n%s" % tv
if err_str:
warn("Manifest definition is potentially unsafe%s" % err_str)
return False
return True | en | 0.644632 | # ------------------------------------------------------------------ # # Copyright (C) 2011-2015 Canonical Ltd. # # This program is free software; you can redistribute it and/or # modify it under the terms of version 2 of the GNU General Public # License published by the Free Software Foundation. # # ------------------------------------------------------------------ # # TODO: move this out to the common library # #from apparmor import AppArmorException This class represents AppArmor exceptions # # End common # # # TODO: move this out to a utilities library # Print error message and exit Print warning message Print message Try to execute the given command. Try to pipe command1 into command2. Print debug message Validate name Validate variable name # If we made it here, we are safe Valid path # We double quote elsewhere # If we made it here, we are safe Known safe regex Verify the policy vendor Verify the policy version Verify the template name Verify the template name Verify the profile name # profile name specifies path # profile name does not specify path # alpha-numeric and Debian version, plus '_' Verify policy group name Find contents of the given directory Open specified file read-only Verify policy compiles # if policy starts with '/' and is one line, assume it is a path # # End utility functions # Easy profile class # If we specified the template and it is an absolute path, just set # the templates directory to the parent of the template so we don't # have to require --template-dir with absolute paths. # If specified --policy-version and --policy-vendor, use # templates_dir/policy_vendor/policy_version Read in defaults from configuration # Read in the configuration Set name of policy Get contents of current template Set current template # If have an abs path, just use it # Find the template since we don't have an abs path Get list of all available templates by filename Get contents of specific policygroup Set policygroups # If have abs path, just use it # Find the policy group since we don't have and abs path Get list of all policy groups by filename Generate an abstraction rule Generate a variable declaration # Make sure we always quote Calculate whitespace prefix based on occurrence of s in t ##ENDUSAGE###' in policy: ##ENDUSAGE###'): ##PROFILEATTACH###', attachment, policy) ##NAME###', name, policy) # Fill-in various comment fields ##COMMENT###', "Comment: %s" % comment, policy) ##AUTHOR###', "Author: %s" % author, policy) ##COPYRIGHT###', "Copyright: %s" % copyright, policy) # Fill-in rules and variables with proper indenting ##ABSTRACTIONS###' # No abstractions specified" % prefix # Specified abstractions" % (prefix) ##POLICYGROUPS###' # No policy groups specified" % prefix # Rules specified via policy groups" % (prefix) ##VAR###' # No template variables specified" % prefix # Specified profile variables" % (prefix) ##READS###' # No read paths specified" % prefix # Specified read permissions" % (prefix) ##WRITES###' # No write paths specified" % prefix # Specified write permissions" % (prefix) Output policy ## aa-easyprof profile #%d ###\n' % count) # should not ever reach this # Generate an absolute path, convertng any path delimiters to '.' Take params list and output a JSON file # when profile_name is not specified, the binary (path attachment) # also functions as the profile name # Add the template since it isn't part of 'params' # Add the policy_version since it isn't part of 'params' # don't re-add the pkey # binary can by None when specifying --profile-name Check for -m/--manifest with conflicting args # template always get set to 'default', can't conflict # 'template', Check for -m/--manifest with conflicting args Check for -m/--manifest with conflicting args (with append) Add parser arguments Parse arguments # This option conflicts with any of the value arguments, e.g. name, # author, template-var, etc. # add policy args now Generate parameters for gen_policy # What about specified multiple times? Take a JSON manifest as a string and updates options, returning an updated binary. Note that a JSON file may contain multiple profiles. # generally mirrors what is settable in gen_policy_params() # The JSON structure is: # { # "security": { # <profile_name>: { # "binary": ... # ... # but because binary can be the profile name, we need to handle # 'profile_name' and 'binary' special. If a profile_name starts with # '/', then it is considered the binary. Otherwise, set the # profile_name and set the binary if it is in the JSON. # handled above Make sure our options are valid Verify manifest for safe and unsafe options | 2.216025 | 2 |
setup.py | urosjevremovic/Belgrade-real-estate-app-adventisrealestate | 0 | 6619965 | <reponame>urosjevremovic/Belgrade-real-estate-app-adventisrealestate
"""
BelgradeRealEstateAppAdventisrealestate
-------------
Script that runs a web scraper in a background and gets
all available real estates in Belgrade, and filters them
by given parameters.
You can get it by downloading it directly or by typing:
$ pip install BelgradeRealEstateAppAdventisrealestate
After it is installed you can start it by simply typing in your terminal:
$ belgrade_real_estates_adventis
Results will be printed in terminal window, and saved into CSV
file for easier browsing.
"""
from setuptools import setup
setup(name='BelgradeRealEstateAppAdventisrealestate',
version='0.8',
description='Script that runs a web scraper in a background and gets all available real estates in Belgrade, '
'and filters them by given parameters.',
long_description=__doc__,
long_description_content_type='text/markdown',
url="https://github.com/urosjevremovic/Belgrade-real-estate-app-adventisrealestate",
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=['RealEstateApp'],
install_requires=['bs4', 'requests'],
entry_points={
"console_scripts": ["belgrade_real_estates_adventis=RealEstateApp.real_estate_app:main"],
},
)
__author__ = '<NAME>'
| """
BelgradeRealEstateAppAdventisrealestate
-------------
Script that runs a web scraper in a background and gets
all available real estates in Belgrade, and filters them
by given parameters.
You can get it by downloading it directly or by typing:
$ pip install BelgradeRealEstateAppAdventisrealestate
After it is installed you can start it by simply typing in your terminal:
$ belgrade_real_estates_adventis
Results will be printed in terminal window, and saved into CSV
file for easier browsing.
"""
from setuptools import setup
setup(name='BelgradeRealEstateAppAdventisrealestate',
version='0.8',
description='Script that runs a web scraper in a background and gets all available real estates in Belgrade, '
'and filters them by given parameters.',
long_description=__doc__,
long_description_content_type='text/markdown',
url="https://github.com/urosjevremovic/Belgrade-real-estate-app-adventisrealestate",
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=['RealEstateApp'],
install_requires=['bs4', 'requests'],
entry_points={
"console_scripts": ["belgrade_real_estates_adventis=RealEstateApp.real_estate_app:main"],
},
)
__author__ = '<NAME>' | en | 0.796216 | BelgradeRealEstateAppAdventisrealestate ------------- Script that runs a web scraper in a background and gets all available real estates in Belgrade, and filters them by given parameters. You can get it by downloading it directly or by typing: $ pip install BelgradeRealEstateAppAdventisrealestate After it is installed you can start it by simply typing in your terminal: $ belgrade_real_estates_adventis Results will be printed in terminal window, and saved into CSV file for easier browsing. | 2.486828 | 2 |
fmri/run_scan.py | DevXl/dd-odc | 0 | 6619966 | <gh_stars>0
#!usr/bin/env python
"""
Created at 10/23/21
@author: <NAME>
fMRI experiment for finding the location of attentional feedback in V1
"""
from psychopy import visual, monitors, event, core, logging, gui, data
from mr_helpers import setup_path, get_monitors
import numpy as np
from pathlib import Path
import pandas as pd
from itertools import product
from collections import defaultdict
# =========================================================================== #
# --------------------------------------------------------------------------- #
# -------------------------------- ! SETUP ---------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# Get the parameters from gui
# setup
input_gui = gui.Dlg(title=">_<")
input_gui.addField('Debug: ', True, color='red')
input_gui.addText('Participant Information', color='blue')
input_gui.addField('Participant Number: ', choices=list(range(1, 25)))
input_gui.addField('Initials: ')
input_gui.addField('DBIC ID: ')
input_gui.addField('Accession Number: ')
input_gui.addField('Session: ', choices=[1, 2])
input_gui.addField('Age: ', choices=list(range(18, 81)))
input_gui.addField('Vision: ', choices=["Normal", "Corrected", "Other"])
input_gui.addText("Experiment Parameters", color='blue')
input_gui.addField("Path orientation: ", 10)
input_gui.addField("Path length: ")
input_gui.addField("Initial Eye:", choices=["Left", "Right"])
input_gui.addFixedField("Date: ", data.getDateStr())
# show
part_info = input_gui.show()
if not input_gui.OK:
core.quit()
else:
print(part_info)
# check debug
debug = part_info[0]
if not debug:
sub_id = int(part_info[1])
sub_init = part_info[2]
else:
sub_init = 'gg'
sub_id = 0
init_eye = part_info[10]
date = part_info[11]
# Directories and files
EXP = "DoubleDriftODC"
PART = "fmri"
TASK = "contrast_change"
ROOTDIR = Path(__file__).resolve().parent.parent # find the current file
PARTDIR = setup_path(sub_id, ROOTDIR, PART)
run_file = PARTDIR / f"sub-{sub_id:02d}_task-{TASK}_part-{PART}_exp-{EXP}"
# file names
exp_file = str(run_file)
frames_file = str(run_file) + "_frame-intervals.log"
log_file = str(run_file) + "_runtime-log.log"
# Monitor
mon_name = 'RaZer'
mon_specs = get_monitors(mon_name)
exp_mon = monitors.Monitor(name=mon_name, width=mon_specs["size_cm"][0], distance=mon_specs["dist"])
exp_mon.setSizePix(mon_specs["size_px"])
exp_mon.save()
# debugging variables
if int(debug):
mon_size = [1024, 768]
full_screen = False
fake_ans = True
else:
mon_size = mon_specs["size_px"]
full_screen = True
fake_ans = False
# Window
exp_win = visual.Window(
monitor=exp_mon,
fullscr=full_screen,
units='deg',
size=mon_size,
allowGUI=False,
screen=0,
autoLog=False,
name='behav_win'
)
# Logging
log_clock = core.Clock()
logging.setDefaultClock(log_clock)
log_data = logging.LogFile(log_file, filemode='w', level=logging.INFO)
logging.console.setLevel(logging.ERROR)
# Add a new logging level name called bids
# we will use this level to log information that will be saved
# in the _events.tsv file for this run
BIDS = 69
logging.addLevel(BIDS, 'BIDS')
# BIDS TEMPLATE
logging.root.log("onset\tduration\ttask_side\teye", level=BIDS)
template_bids = '{onset:.3f}\t{duration:.3f}\t{hemifield}\t{eye}'
# =========================================================================== #
# --------------------------------------------------------------------------- #
# ------------------------------ ! STIMULUS --------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# gabor
horiz_offset = 7
left_gabor = visual.GratingStim(
win=exp_win,
mask='gauss',
pos=[-horiz_offset, 0],
contrast=1,
interpolate=False,
autoLog=False
)
right_gabor = visual.GratingStim(
win=exp_win,
mask='gauss',
pos=[horiz_offset, 0],
contrast=1,
interpolate=False,
autoLog=False
)
# checkerboards
sqr_sz = 3
n_sqrs = 8
stim_sides = ["left", "right"]
patterns = ["pat1", "pat2"]
oris = ["vert", "obl"]
checkers = defaultdict(list)
for lr in stim_sides:
for pat in patterns:
for ori in oris:
for s in range(n_sqrs):
checkers[f"{lr}_{pat}_{ori}"].append(
visual.Rect(
win=exp_win,
size=sqr_sz,
pos=[-horiz_offset if lr == "left" else horiz_offset, sqr_sz / 2 + s],
lineColor=0,
ori=0 if (ori == "vert") else int(part_info[8]),
fillColor=(1 if s % 2 else -1) if (pat == "pat1") else (-1 if s % 2 else 1),
autoLog=False
)
)
# cue
cue = visual.ImageStim(
win=exp_win,
image='arrow.png',
autoLog=False
)
# fixation dot
fix = visual.Circle(
win=exp_win,
radius=0.1,
fillColor='black',
size=.3,
autoLog=False
)
# Text messages
msg_stim = visual.TextStim(win=exp_win, wrapWidth=30, height=.8, alignText='left', autoLog=False)
rep_stim = visual.TextStim(win=exp_win, wrapWidth=30, height=.8, pos=[0, -5], autoLog=False)
# =========================================================================== #
# --------------------------------------------------------------------------- #
# ------------------------------ ! PROCEDURE -------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# Instructions
instr_msg = \
"On each trial, maintain fixation at the center of the screen on the inner circle.\n\n" \
"A red arrow will appear at fixation that indicates which side of the screen you should pay attention to for all " \
"the subsequent trials.\n\n" \
"When the arrow disappears, maintain fixation and pay attention to the moving Gabor on the cued side and ignore " \
"the other side of the screen.\n\n" \
"If the gabor on the cued side becomes dimmer, press the response button!\n\n"\
"Press the response button to start the experiment..."
end_msg = "Thank you for your participation :)"
# Conditions
hemifields = ["L", "R"] # target left or right side of the fixation
# trial_types = ["dd", "ctrl_vert", "ctrl_oblq"] # is it a double- or single-drift
trial_types = ["dd"]
eyes = ["L", "R"] # left eye viewing or right eye: first 5 runs are one eye and the last 5 are the other
block_parts = ["cue", "fix", "stim"]
n_blocks = 12 # each block has an initial 4s wait period followed by 11s of stimulus presentation and 15s fixation
n_runs = 8 # number of runs
run_per_cond = 2
# Data handler
# columns of experiment dataframe
cols = [
"HEMIFIELD",
"TRIAL_TYPE",
"DIM",
"DIM_TIME",
"TRIAL",
"EYE",
"BLOCK",
"RUN",
"PATH_LEN",
"PATH_ORI",
"TASK",
"EXPERIMENT",
"SUB_ID",
"SUB_INITIALS",
"DBIC_ID",
"ACCESSION_NUM"
]
# blocks and trials
exp_runs = None # actual dataframe
# in the speed block, speeds are varied. In the duration block, durations are varied.
conds = list(product(trial_types, eyes, hemifields))
n_trials = n_runs * n_blocks * len(conds) # total number of trials in the blocks
# loop through conditions, make every permutation, and save it to a numpy array
run_cnt = 0
for eye in eyes:
for hemi in hemifields:
for run in range(run_per_cond):
for block in range(n_blocks):
for trial_type in block_parts:
row = np.array([
eye,
hemi, # target hemifield
run + 1,
block + 1,
trial_type, # cue, stim, or fixation
np.NaN, # does the target dim
np.NaN, # time of dimming
np.NaN, # trial
np.NaN, # block
np.NaN, # run
np.NaN, # path length
np.NaN, # path orientation
TASK, # task
EXP, # experiment
sub_id, # subject ID
sub_init, # subject initials
part_info[3], # DBIC ID
part_info[4] # Accession Number
])
# if trial_type == 'fix':
# for run in range(n_runs):
#
# # select the initial eye for the first half of trials and the other eye for the other half
# run_eye = ("L" if run < n_runs / 2 else "R") if init_eye == "L" else ("R" if run < n_runs / 2 else "L")
#
# # randomize block
# run_hemi = "L" if run in
# rand_blocks = np.ones(n_blocks)
# rand_blocks[:n_blocks // 2] = 0
# np.random.shuffle(rand_blocks)
#
# for block in range(n_blocks):
#
# # select the random hemifield for this block
# block_side = hemifields[rand_blocks]
rows = None
for hemi, trial_type, eye in conds:
row = np.array([
hemi, # cued hemifield
trial_type, # vertical control, oblique control, or dd
np.NaN, # does the target dim
np.NaN, # time of dimming
np.NaN, # trial
eye, # eye
np.NaN, # block
np.NaN, # run
np.NaN, # path length
np.NaN, # path orientation
TASK, # task
EXP, # experiment
sub_id, # subject ID
sub_init, # subject initials
part_info[3], # DBIC ID
part_info[4] # Accession Number
])
if rows is None:
rows = row
else:
rows = np.vstack((rows, row))
# repeat conditions for however many trials
this_block = np.repeat(rows, run_per_cond, axis=0)
print(this_block)
# shuffle them
np.random.shuffle(this_block) # this preserves the order within row and just shuffles the rows
# sanity check
# print(f"Shape of block {b}: {this_block.shape}")
assert this_block.shape == (n_trials, len(cols))
# add trial labels
this_block[:, 6] = np.arange(1, n_trials + 1) # trial is the 7(-1)th col. set it to a list of ordered numbers
if exp_blocks is None:
exp_blocks = this_block
else:
exp_blocks = np.vstack((exp_blocks, this_block))
# =========================================================================== #
# --------------------------------------------------------------------------- #
# -------------------------------- ! RUN ------------------------------------ #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# Initialize parameters
# gabor
gab_size = 1.2 # in dva
gab_shift = 7 # degrees shift in x from the center of the screen
gab_sf = .5 / gab_size # cycles per degree spatial frequency
gabor.size = gab_size
gabor.sf = gab_sf
# runtime params
exp_win.refreshThreshold = (1 / mon_specs["refresh_rate"]) + 0.003
path_dur = 1000 # milli-second
n_frames = np.floor(path_dur * int(mon_specs["refresh_rate"]) / 1000)
# clocks
exp_clock = core.Clock()
# show instructions and wait for keypress
msg_stim.text = instr_msg
msg_stim.draw()
exp_win.flip()
event.waitKeys(keyList=['space'])
logging.exp("===========================")
logging.exp("Experiment started")
logging.exp("===========================")
exp_win.flip()
exp_clock.reset()
# start trials
for trial in range(n_trials):
# log it
logging.exp(f"---------------------------")
logging.exp(f"Trial {trial} started.")
# run parameters
# from Hz to cycles/frame
v_tex = [exp_blocks[trial, 1] / mon_specs["refresh_rate"], 0]
v_env = [0, exp_blocks[trial, 2] / mon_specs["refresh_rate"]]
# start recording frames
exp_win.recordFrameIntervals = True
# show fixation
fix.autoDraw = True
# log it
logging.exp("Moving the stimulus.")
# show the drift
for rep in range(2):
# control drawing every frame
for frame in range(n_frames):
# drift right / move up on odd repetitions
if not rep:
gabor.phase += v_tex
gabor.pos += v_env
# drift left / move down on even repetitions
else:
gabor.phase -= v_tex
gabor.pos -= v_env
gabor.draw()
exp_win.flip()
# Get the response
# clean buffer
event.clearEvents()
# reporting device
ans_mouse = event.Mouse(visible=False, win=exp_win)
# initialize response line length and ori every trial
resp_line.size = .1 if resp_stages[0] == 'Length' else 1 # starting line size with 1 dva
resp_line.ori = 90
# for debugging
if not fake_ans:
# text message
rep_stim.autoDraw = True
# reporting stages
for stage in resp_stages:
# get response
resp = True
# reporting loop
while resp:
# mouse wheel for controlling the line
wheel_dX, wheel_dY = ans_mouse.getWheelRel()
# change the orientation
if stage == 'Orientation':
# text message
rep_stim.text = "Match orientation"
rep_stim.draw()
if 0 <= resp_line.ori < 180: # don't allow weird responses
resp_line.setOri(wheel_dY * 2, '-')
else:
resp_line.ori = 0
# change the length
else:
# text message
rep_stim.text = "Match length"
rep_stim.draw()
if resp_line.size > 0: # don't allow below 0 size!
resp_line.size += wheel_dY * .05
else:
resp_line.size = .1
resp_line.draw()
exp_win.flip()
# get the keypress for end of reporting
keys = event.getKeys()
for key in keys:
# space is the end of reporting
if key == 'space':
# save the orientation
if stage == 'Orientation':
exp_blocks[trial, 4] = resp_line.ori
# save the length
else:
# sometimes .size returns an np array!
try:
# the default response line size=1 means 2dva
exp_blocks[trial, 5] = np.round(resp_line.size, 2)
except:
exp_blocks[trial, 5] = np.round(resp_line.size[0], 2)
# log and end reporting
logging.exp(f"Response recorded: {exp_blocks[trial, 4]}")
resp = False # end reporting
rep_stim.autoDraw = False
# escape is quitting
elif key == 'escape': # quit button
logging.error("Aborted experiment.")
exp_win.close()
core.quit()
# if in debugging mode
else:
# just generate fake responses
exp_win.flip()
core.wait(4) # approximate response time
exp_blocks[trial, 4] = .33 # random orientation
exp_blocks[trial, 5] = .69 # random size
# Turn fixation off
fix.autoDraw = False
# clear buffer
event.clearEvents()
logging.exp(f"Trial ended.")
# =========================================================================== #
# --------------------------------------------------------------------------- #
# -------------------------------- ! WRAP UP -------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# time it
t_end = np.round(exp_clock.getTime() / 60, 2)
logging.exp(f"Experiment finished. Duration: {t_end} minutes.")
print(f"Experiment finished. Duration: {t_end} minutes.")
# show than you message
msg_stim.text = end_msg
msg_stim.draw()
exp_win.flip()
core.wait(2)
exp_win.logOnFlip("Experiment ended.", level=logging.EXP)
exp_win.flip()
# save in csv
exp_df = pd.DataFrame(exp_blocks, columns=cols) # turn it into a pandas dataframe
exp_df.to_csv(exp_file + '.csv', sep=',', index=False)
# save recorded frames
exp_win.saveFrameIntervals(fileName=frames_file)
exp_win.close()
core.quit()
| #!usr/bin/env python
"""
Created at 10/23/21
@author: <NAME>
fMRI experiment for finding the location of attentional feedback in V1
"""
from psychopy import visual, monitors, event, core, logging, gui, data
from mr_helpers import setup_path, get_monitors
import numpy as np
from pathlib import Path
import pandas as pd
from itertools import product
from collections import defaultdict
# =========================================================================== #
# --------------------------------------------------------------------------- #
# -------------------------------- ! SETUP ---------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# Get the parameters from gui
# setup
input_gui = gui.Dlg(title=">_<")
input_gui.addField('Debug: ', True, color='red')
input_gui.addText('Participant Information', color='blue')
input_gui.addField('Participant Number: ', choices=list(range(1, 25)))
input_gui.addField('Initials: ')
input_gui.addField('DBIC ID: ')
input_gui.addField('Accession Number: ')
input_gui.addField('Session: ', choices=[1, 2])
input_gui.addField('Age: ', choices=list(range(18, 81)))
input_gui.addField('Vision: ', choices=["Normal", "Corrected", "Other"])
input_gui.addText("Experiment Parameters", color='blue')
input_gui.addField("Path orientation: ", 10)
input_gui.addField("Path length: ")
input_gui.addField("Initial Eye:", choices=["Left", "Right"])
input_gui.addFixedField("Date: ", data.getDateStr())
# show
part_info = input_gui.show()
if not input_gui.OK:
core.quit()
else:
print(part_info)
# check debug
debug = part_info[0]
if not debug:
sub_id = int(part_info[1])
sub_init = part_info[2]
else:
sub_init = 'gg'
sub_id = 0
init_eye = part_info[10]
date = part_info[11]
# Directories and files
EXP = "DoubleDriftODC"
PART = "fmri"
TASK = "contrast_change"
ROOTDIR = Path(__file__).resolve().parent.parent # find the current file
PARTDIR = setup_path(sub_id, ROOTDIR, PART)
run_file = PARTDIR / f"sub-{sub_id:02d}_task-{TASK}_part-{PART}_exp-{EXP}"
# file names
exp_file = str(run_file)
frames_file = str(run_file) + "_frame-intervals.log"
log_file = str(run_file) + "_runtime-log.log"
# Monitor
mon_name = 'RaZer'
mon_specs = get_monitors(mon_name)
exp_mon = monitors.Monitor(name=mon_name, width=mon_specs["size_cm"][0], distance=mon_specs["dist"])
exp_mon.setSizePix(mon_specs["size_px"])
exp_mon.save()
# debugging variables
if int(debug):
mon_size = [1024, 768]
full_screen = False
fake_ans = True
else:
mon_size = mon_specs["size_px"]
full_screen = True
fake_ans = False
# Window
exp_win = visual.Window(
monitor=exp_mon,
fullscr=full_screen,
units='deg',
size=mon_size,
allowGUI=False,
screen=0,
autoLog=False,
name='behav_win'
)
# Logging
log_clock = core.Clock()
logging.setDefaultClock(log_clock)
log_data = logging.LogFile(log_file, filemode='w', level=logging.INFO)
logging.console.setLevel(logging.ERROR)
# Add a new logging level name called bids
# we will use this level to log information that will be saved
# in the _events.tsv file for this run
BIDS = 69
logging.addLevel(BIDS, 'BIDS')
# BIDS TEMPLATE
logging.root.log("onset\tduration\ttask_side\teye", level=BIDS)
template_bids = '{onset:.3f}\t{duration:.3f}\t{hemifield}\t{eye}'
# =========================================================================== #
# --------------------------------------------------------------------------- #
# ------------------------------ ! STIMULUS --------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# gabor
horiz_offset = 7
left_gabor = visual.GratingStim(
win=exp_win,
mask='gauss',
pos=[-horiz_offset, 0],
contrast=1,
interpolate=False,
autoLog=False
)
right_gabor = visual.GratingStim(
win=exp_win,
mask='gauss',
pos=[horiz_offset, 0],
contrast=1,
interpolate=False,
autoLog=False
)
# checkerboards
sqr_sz = 3
n_sqrs = 8
stim_sides = ["left", "right"]
patterns = ["pat1", "pat2"]
oris = ["vert", "obl"]
checkers = defaultdict(list)
for lr in stim_sides:
for pat in patterns:
for ori in oris:
for s in range(n_sqrs):
checkers[f"{lr}_{pat}_{ori}"].append(
visual.Rect(
win=exp_win,
size=sqr_sz,
pos=[-horiz_offset if lr == "left" else horiz_offset, sqr_sz / 2 + s],
lineColor=0,
ori=0 if (ori == "vert") else int(part_info[8]),
fillColor=(1 if s % 2 else -1) if (pat == "pat1") else (-1 if s % 2 else 1),
autoLog=False
)
)
# cue
cue = visual.ImageStim(
win=exp_win,
image='arrow.png',
autoLog=False
)
# fixation dot
fix = visual.Circle(
win=exp_win,
radius=0.1,
fillColor='black',
size=.3,
autoLog=False
)
# Text messages
msg_stim = visual.TextStim(win=exp_win, wrapWidth=30, height=.8, alignText='left', autoLog=False)
rep_stim = visual.TextStim(win=exp_win, wrapWidth=30, height=.8, pos=[0, -5], autoLog=False)
# =========================================================================== #
# --------------------------------------------------------------------------- #
# ------------------------------ ! PROCEDURE -------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# Instructions
instr_msg = \
"On each trial, maintain fixation at the center of the screen on the inner circle.\n\n" \
"A red arrow will appear at fixation that indicates which side of the screen you should pay attention to for all " \
"the subsequent trials.\n\n" \
"When the arrow disappears, maintain fixation and pay attention to the moving Gabor on the cued side and ignore " \
"the other side of the screen.\n\n" \
"If the gabor on the cued side becomes dimmer, press the response button!\n\n"\
"Press the response button to start the experiment..."
end_msg = "Thank you for your participation :)"
# Conditions
hemifields = ["L", "R"] # target left or right side of the fixation
# trial_types = ["dd", "ctrl_vert", "ctrl_oblq"] # is it a double- or single-drift
trial_types = ["dd"]
eyes = ["L", "R"] # left eye viewing or right eye: first 5 runs are one eye and the last 5 are the other
block_parts = ["cue", "fix", "stim"]
n_blocks = 12 # each block has an initial 4s wait period followed by 11s of stimulus presentation and 15s fixation
n_runs = 8 # number of runs
run_per_cond = 2
# Data handler
# columns of experiment dataframe
cols = [
"HEMIFIELD",
"TRIAL_TYPE",
"DIM",
"DIM_TIME",
"TRIAL",
"EYE",
"BLOCK",
"RUN",
"PATH_LEN",
"PATH_ORI",
"TASK",
"EXPERIMENT",
"SUB_ID",
"SUB_INITIALS",
"DBIC_ID",
"ACCESSION_NUM"
]
# blocks and trials
exp_runs = None # actual dataframe
# in the speed block, speeds are varied. In the duration block, durations are varied.
conds = list(product(trial_types, eyes, hemifields))
n_trials = n_runs * n_blocks * len(conds) # total number of trials in the blocks
# loop through conditions, make every permutation, and save it to a numpy array
run_cnt = 0
for eye in eyes:
for hemi in hemifields:
for run in range(run_per_cond):
for block in range(n_blocks):
for trial_type in block_parts:
row = np.array([
eye,
hemi, # target hemifield
run + 1,
block + 1,
trial_type, # cue, stim, or fixation
np.NaN, # does the target dim
np.NaN, # time of dimming
np.NaN, # trial
np.NaN, # block
np.NaN, # run
np.NaN, # path length
np.NaN, # path orientation
TASK, # task
EXP, # experiment
sub_id, # subject ID
sub_init, # subject initials
part_info[3], # DBIC ID
part_info[4] # Accession Number
])
# if trial_type == 'fix':
# for run in range(n_runs):
#
# # select the initial eye for the first half of trials and the other eye for the other half
# run_eye = ("L" if run < n_runs / 2 else "R") if init_eye == "L" else ("R" if run < n_runs / 2 else "L")
#
# # randomize block
# run_hemi = "L" if run in
# rand_blocks = np.ones(n_blocks)
# rand_blocks[:n_blocks // 2] = 0
# np.random.shuffle(rand_blocks)
#
# for block in range(n_blocks):
#
# # select the random hemifield for this block
# block_side = hemifields[rand_blocks]
rows = None
for hemi, trial_type, eye in conds:
row = np.array([
hemi, # cued hemifield
trial_type, # vertical control, oblique control, or dd
np.NaN, # does the target dim
np.NaN, # time of dimming
np.NaN, # trial
eye, # eye
np.NaN, # block
np.NaN, # run
np.NaN, # path length
np.NaN, # path orientation
TASK, # task
EXP, # experiment
sub_id, # subject ID
sub_init, # subject initials
part_info[3], # DBIC ID
part_info[4] # Accession Number
])
if rows is None:
rows = row
else:
rows = np.vstack((rows, row))
# repeat conditions for however many trials
this_block = np.repeat(rows, run_per_cond, axis=0)
print(this_block)
# shuffle them
np.random.shuffle(this_block) # this preserves the order within row and just shuffles the rows
# sanity check
# print(f"Shape of block {b}: {this_block.shape}")
assert this_block.shape == (n_trials, len(cols))
# add trial labels
this_block[:, 6] = np.arange(1, n_trials + 1) # trial is the 7(-1)th col. set it to a list of ordered numbers
if exp_blocks is None:
exp_blocks = this_block
else:
exp_blocks = np.vstack((exp_blocks, this_block))
# =========================================================================== #
# --------------------------------------------------------------------------- #
# -------------------------------- ! RUN ------------------------------------ #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# Initialize parameters
# gabor
gab_size = 1.2 # in dva
gab_shift = 7 # degrees shift in x from the center of the screen
gab_sf = .5 / gab_size # cycles per degree spatial frequency
gabor.size = gab_size
gabor.sf = gab_sf
# runtime params
exp_win.refreshThreshold = (1 / mon_specs["refresh_rate"]) + 0.003
path_dur = 1000 # milli-second
n_frames = np.floor(path_dur * int(mon_specs["refresh_rate"]) / 1000)
# clocks
exp_clock = core.Clock()
# show instructions and wait for keypress
msg_stim.text = instr_msg
msg_stim.draw()
exp_win.flip()
event.waitKeys(keyList=['space'])
logging.exp("===========================")
logging.exp("Experiment started")
logging.exp("===========================")
exp_win.flip()
exp_clock.reset()
# start trials
for trial in range(n_trials):
# log it
logging.exp(f"---------------------------")
logging.exp(f"Trial {trial} started.")
# run parameters
# from Hz to cycles/frame
v_tex = [exp_blocks[trial, 1] / mon_specs["refresh_rate"], 0]
v_env = [0, exp_blocks[trial, 2] / mon_specs["refresh_rate"]]
# start recording frames
exp_win.recordFrameIntervals = True
# show fixation
fix.autoDraw = True
# log it
logging.exp("Moving the stimulus.")
# show the drift
for rep in range(2):
# control drawing every frame
for frame in range(n_frames):
# drift right / move up on odd repetitions
if not rep:
gabor.phase += v_tex
gabor.pos += v_env
# drift left / move down on even repetitions
else:
gabor.phase -= v_tex
gabor.pos -= v_env
gabor.draw()
exp_win.flip()
# Get the response
# clean buffer
event.clearEvents()
# reporting device
ans_mouse = event.Mouse(visible=False, win=exp_win)
# initialize response line length and ori every trial
resp_line.size = .1 if resp_stages[0] == 'Length' else 1 # starting line size with 1 dva
resp_line.ori = 90
# for debugging
if not fake_ans:
# text message
rep_stim.autoDraw = True
# reporting stages
for stage in resp_stages:
# get response
resp = True
# reporting loop
while resp:
# mouse wheel for controlling the line
wheel_dX, wheel_dY = ans_mouse.getWheelRel()
# change the orientation
if stage == 'Orientation':
# text message
rep_stim.text = "Match orientation"
rep_stim.draw()
if 0 <= resp_line.ori < 180: # don't allow weird responses
resp_line.setOri(wheel_dY * 2, '-')
else:
resp_line.ori = 0
# change the length
else:
# text message
rep_stim.text = "Match length"
rep_stim.draw()
if resp_line.size > 0: # don't allow below 0 size!
resp_line.size += wheel_dY * .05
else:
resp_line.size = .1
resp_line.draw()
exp_win.flip()
# get the keypress for end of reporting
keys = event.getKeys()
for key in keys:
# space is the end of reporting
if key == 'space':
# save the orientation
if stage == 'Orientation':
exp_blocks[trial, 4] = resp_line.ori
# save the length
else:
# sometimes .size returns an np array!
try:
# the default response line size=1 means 2dva
exp_blocks[trial, 5] = np.round(resp_line.size, 2)
except:
exp_blocks[trial, 5] = np.round(resp_line.size[0], 2)
# log and end reporting
logging.exp(f"Response recorded: {exp_blocks[trial, 4]}")
resp = False # end reporting
rep_stim.autoDraw = False
# escape is quitting
elif key == 'escape': # quit button
logging.error("Aborted experiment.")
exp_win.close()
core.quit()
# if in debugging mode
else:
# just generate fake responses
exp_win.flip()
core.wait(4) # approximate response time
exp_blocks[trial, 4] = .33 # random orientation
exp_blocks[trial, 5] = .69 # random size
# Turn fixation off
fix.autoDraw = False
# clear buffer
event.clearEvents()
logging.exp(f"Trial ended.")
# =========================================================================== #
# --------------------------------------------------------------------------- #
# -------------------------------- ! WRAP UP -------------------------------- #
# --------------------------------------------------------------------------- #
# =========================================================================== #
# time it
t_end = np.round(exp_clock.getTime() / 60, 2)
logging.exp(f"Experiment finished. Duration: {t_end} minutes.")
print(f"Experiment finished. Duration: {t_end} minutes.")
# show than you message
msg_stim.text = end_msg
msg_stim.draw()
exp_win.flip()
core.wait(2)
exp_win.logOnFlip("Experiment ended.", level=logging.EXP)
exp_win.flip()
# save in csv
exp_df = pd.DataFrame(exp_blocks, columns=cols) # turn it into a pandas dataframe
exp_df.to_csv(exp_file + '.csv', sep=',', index=False)
# save recorded frames
exp_win.saveFrameIntervals(fileName=frames_file)
exp_win.close()
core.quit() | en | 0.587047 | #!usr/bin/env python Created at 10/23/21 @author: <NAME> fMRI experiment for finding the location of attentional feedback in V1 # =========================================================================== # # --------------------------------------------------------------------------- # # -------------------------------- ! SETUP ---------------------------------- # # --------------------------------------------------------------------------- # # =========================================================================== # # Get the parameters from gui # setup # show # check debug # Directories and files # find the current file # file names # Monitor # debugging variables # Window # Logging # Add a new logging level name called bids # we will use this level to log information that will be saved # in the _events.tsv file for this run # BIDS TEMPLATE # =========================================================================== # # --------------------------------------------------------------------------- # # ------------------------------ ! STIMULUS --------------------------------- # # --------------------------------------------------------------------------- # # =========================================================================== # # gabor # checkerboards # cue # fixation dot # Text messages # =========================================================================== # # --------------------------------------------------------------------------- # # ------------------------------ ! PROCEDURE -------------------------------- # # --------------------------------------------------------------------------- # # =========================================================================== # # Instructions # Conditions # target left or right side of the fixation # trial_types = ["dd", "ctrl_vert", "ctrl_oblq"] # is it a double- or single-drift # left eye viewing or right eye: first 5 runs are one eye and the last 5 are the other # each block has an initial 4s wait period followed by 11s of stimulus presentation and 15s fixation # number of runs # Data handler # columns of experiment dataframe # blocks and trials # actual dataframe # in the speed block, speeds are varied. In the duration block, durations are varied. # total number of trials in the blocks # loop through conditions, make every permutation, and save it to a numpy array # target hemifield # cue, stim, or fixation # does the target dim # time of dimming # trial # block # run # path length # path orientation # task # experiment # subject ID # subject initials # DBIC ID # Accession Number # if trial_type == 'fix': # for run in range(n_runs): # # # select the initial eye for the first half of trials and the other eye for the other half # run_eye = ("L" if run < n_runs / 2 else "R") if init_eye == "L" else ("R" if run < n_runs / 2 else "L") # # # randomize block # run_hemi = "L" if run in # rand_blocks = np.ones(n_blocks) # rand_blocks[:n_blocks // 2] = 0 # np.random.shuffle(rand_blocks) # # for block in range(n_blocks): # # # select the random hemifield for this block # block_side = hemifields[rand_blocks] # cued hemifield # vertical control, oblique control, or dd # does the target dim # time of dimming # trial # eye # block # run # path length # path orientation # task # experiment # subject ID # subject initials # DBIC ID # Accession Number # repeat conditions for however many trials # shuffle them # this preserves the order within row and just shuffles the rows # sanity check # print(f"Shape of block {b}: {this_block.shape}") # add trial labels # trial is the 7(-1)th col. set it to a list of ordered numbers # =========================================================================== # # --------------------------------------------------------------------------- # # -------------------------------- ! RUN ------------------------------------ # # --------------------------------------------------------------------------- # # =========================================================================== # # Initialize parameters # gabor # in dva # degrees shift in x from the center of the screen # cycles per degree spatial frequency # runtime params # milli-second # clocks # show instructions and wait for keypress # start trials # log it # run parameters # from Hz to cycles/frame # start recording frames # show fixation # log it # show the drift # control drawing every frame # drift right / move up on odd repetitions # drift left / move down on even repetitions # Get the response # clean buffer # reporting device # initialize response line length and ori every trial # starting line size with 1 dva # for debugging # text message # reporting stages # get response # reporting loop # mouse wheel for controlling the line # change the orientation # text message # don't allow weird responses # change the length # text message # don't allow below 0 size! # get the keypress for end of reporting # space is the end of reporting # save the orientation # save the length # sometimes .size returns an np array! # the default response line size=1 means 2dva # log and end reporting # end reporting # escape is quitting # quit button # if in debugging mode # just generate fake responses # approximate response time # random orientation # random size # Turn fixation off # clear buffer # =========================================================================== # # --------------------------------------------------------------------------- # # -------------------------------- ! WRAP UP -------------------------------- # # --------------------------------------------------------------------------- # # =========================================================================== # # time it # show than you message # save in csv # turn it into a pandas dataframe # save recorded frames | 2.099253 | 2 |
utils/generators/matrix_multiply.py | sgpthomas/diospyros | 27 | 6619967 | """
Matrix multiply generator. Generates an Eigen matrix multiply override kernel.
"""
def generator(kernel_name, params, spec_file):
input_rows = params["input_rows"]
input_cols = params["input_cols"]
output_cols = params["output_cols"]
# inject specification template with arguments
spec = """/*!
Specification file of the target kernel to be consumed by the Diosypros tool
*/
#define A_ROWS {}
#define A_COLS {}
#define B_COLS {}
void {}(
float a_in[A_ROWS * A_COLS],
float b_in[A_COLS * B_COLS],
float c_out[A_ROWS * B_COLS]) {{
for (int i = 0; i < A_ROWS; i++) {{
for (int j = 0; j < B_COLS; j++) {{
c_out[j * A_ROWS + i] = 0;
for (int k = 0; k < A_COLS; k++) {{
c_out[j * A_ROWS + i] += a_in[k * A_ROWS + i] * b_in[j * A_COLS + k];
}}
}}
}}
}}""".format(input_rows, input_cols, output_cols, kernel_name)
handle = open(spec_file, "w")
handle.write(spec)
handle.close()
# return a dictionary of the inputs and outputs of this function definition
# and type signatures
manifest_shard = {
"inputs": {
"a": "Eigen::Matrix<float, {}, {}>".format(input_rows, input_cols),
"b": "Eigen::Matrix<float, {}, {}>".format(input_cols, output_cols)
},
"outputs": {
"c": "Eigen::Matrix<float, {}, {}>".format(input_rows, output_cols)
},
"test": "c = a * b"
}
return manifest_shard
| """
Matrix multiply generator. Generates an Eigen matrix multiply override kernel.
"""
def generator(kernel_name, params, spec_file):
input_rows = params["input_rows"]
input_cols = params["input_cols"]
output_cols = params["output_cols"]
# inject specification template with arguments
spec = """/*!
Specification file of the target kernel to be consumed by the Diosypros tool
*/
#define A_ROWS {}
#define A_COLS {}
#define B_COLS {}
void {}(
float a_in[A_ROWS * A_COLS],
float b_in[A_COLS * B_COLS],
float c_out[A_ROWS * B_COLS]) {{
for (int i = 0; i < A_ROWS; i++) {{
for (int j = 0; j < B_COLS; j++) {{
c_out[j * A_ROWS + i] = 0;
for (int k = 0; k < A_COLS; k++) {{
c_out[j * A_ROWS + i] += a_in[k * A_ROWS + i] * b_in[j * A_COLS + k];
}}
}}
}}
}}""".format(input_rows, input_cols, output_cols, kernel_name)
handle = open(spec_file, "w")
handle.write(spec)
handle.close()
# return a dictionary of the inputs and outputs of this function definition
# and type signatures
manifest_shard = {
"inputs": {
"a": "Eigen::Matrix<float, {}, {}>".format(input_rows, input_cols),
"b": "Eigen::Matrix<float, {}, {}>".format(input_cols, output_cols)
},
"outputs": {
"c": "Eigen::Matrix<float, {}, {}>".format(input_rows, output_cols)
},
"test": "c = a * b"
}
return manifest_shard
| en | 0.367231 | Matrix multiply generator. Generates an Eigen matrix multiply override kernel. # inject specification template with arguments /*! Specification file of the target kernel to be consumed by the Diosypros tool */ #define A_ROWS {} #define A_COLS {} #define B_COLS {} void {}( float a_in[A_ROWS * A_COLS], float b_in[A_COLS * B_COLS], float c_out[A_ROWS * B_COLS]) {{ for (int i = 0; i < A_ROWS; i++) {{ for (int j = 0; j < B_COLS; j++) {{ c_out[j * A_ROWS + i] = 0; for (int k = 0; k < A_COLS; k++) {{ c_out[j * A_ROWS + i] += a_in[k * A_ROWS + i] * b_in[j * A_COLS + k]; }} }} }} }} # return a dictionary of the inputs and outputs of this function definition # and type signatures | 2.932542 | 3 |
src/kmk/handlers/stock.py | kbjunky/MacroPact | 61 | 6619968 | from kmk.kmktime import sleep_ms
def passthrough(key, state, *args, **kwargs):
return state
def default_pressed(key, state, KC, coord_int=None, coord_raw=None):
state.hid_pending = True
if coord_int is not None:
state.coord_keys_pressed[coord_int] = key
state.keys_pressed.add(key)
return state
def default_released(key, state, KC, coord_int=None, coord_raw=None):
state.hid_pending = True
state.keys_pressed.discard(key)
if coord_int is not None:
state.keys_pressed.discard(state.coord_keys_pressed.get(coord_int, None))
state.coord_keys_pressed[coord_int] = None
return state
def reset(*args, **kwargs):
try:
import machine
machine.reset()
except ImportError:
import microcontroller
microcontroller.reset()
def bootloader(*args, **kwargs):
try:
import machine
machine.bootloader()
except ImportError:
import microcontroller
microcontroller.on_next_reset(microcontroller.RunMode.BOOTLOADER)
microcontroller.reset()
def debug_pressed(key, state, KC, *args, **kwargs):
if state.config.debug_enabled:
print('DebugDisable()')
else:
print('DebugEnable()')
state.config.debug_enabled = not state.config.debug_enabled
return state
def gesc_pressed(key, state, KC, *args, **kwargs):
GESC_TRIGGERS = {KC.LSHIFT, KC.RSHIFT, KC.LGUI, KC.RGUI}
if GESC_TRIGGERS.intersection(state.keys_pressed):
# First, release GUI if already pressed
state.config._send_hid()
# if Shift is held, KC_GRAVE will become KC_TILDE on OS level
state.keys_pressed.add(KC.GRAVE)
state.hid_pending = True
return state
# else return KC_ESC
state.keys_pressed.add(KC.ESCAPE)
state.hid_pending = True
return state
def gesc_released(key, state, KC, *args, **kwargs):
state.keys_pressed.discard(KC.ESCAPE)
state.keys_pressed.discard(KC.GRAVE)
state.hid_pending = True
return state
def bkdl_pressed(key, state, KC, *args, **kwargs):
BKDL_TRIGGERS = {KC.LGUI, KC.RGUI}
if BKDL_TRIGGERS.intersection(state.keys_pressed):
state.config._send_hid()
state.keys_pressed.add(KC.DEL)
state.hid_pending = True
return state
# else return KC_ESC
state.keys_pressed.add(KC.BKSP)
state.hid_pending = True
return state
def bkdl_released(key, state, KC, *args, **kwargs):
state.keys_pressed.discard(KC.BKSP)
state.keys_pressed.discard(KC.DEL)
state.hid_pending = True
return state
def sleep_pressed(key, state, KC, *args, **kwargs):
sleep_ms(key.meta.ms)
return state
def uc_mode_pressed(key, state, *args, **kwargs):
state.config.unicode_mode = key.meta.mode
return state
def leader_pressed(key, state, *args, **kwargs):
return state._begin_leader_mode()
def td_pressed(key, state, *args, **kwargs):
return state._process_tap_dance(key, True)
def td_released(key, state, *args, **kwargs):
return state._process_tap_dance(key, False)
def rgb_tog(key, state, *args, **kwargs):
if state.config.pixels.animation_mode == 'static_standby':
state.config.pixels.animation_mode = 'static'
state.config.pixels.enabled = not state.config.pixels.enabled
return state
def rgb_hui(key, state, *args, **kwargs):
state.config.pixels.increase_hue()
return state
def rgb_hud(key, state, *args, **kwargs):
state.config.pixels.decrease_hue()
return state
def rgb_sai(key, state, *args, **kwargs):
state.config.pixels.increase_sat()
return state
def rgb_sad(key, state, *args, **kwargs):
state.config.pixels.decrease_sat()
return state
def rgb_vai(key, state, *args, **kwargs):
state.config.pixels.increase_val()
return state
def rgb_vad(key, state, *args, **kwargs):
state.config.pixels.decrease_val()
return state
def rgb_ani(key, state, *args, **kwargs):
state.config.pixels.increase_ani()
return state
def rgb_and(key, state, *args, **kwargs):
state.config.pixels.decrease_ani()
return state
def rgb_mode_static(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'static'
return state
def rgb_mode_breathe(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'breathing'
return state
def rgb_mode_breathe_rainbow(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'breathing_rainbow'
return state
def rgb_mode_rainbow(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'rainbow'
return state
def rgb_mode_swirl(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'swirl'
return state
def rgb_mode_knight(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'knight'
return state
def led_tog(key, state, *args, **kwargs):
if state.config.led.animation_mode == 'static_standby':
state.config.led.animation_mode = 'static'
state.config.led.enabled = not state.config.led.enabled
return state
def led_inc(key, state, *args, **kwargs):
state.config.led.increase_brightness()
return state
def led_dec(key, state, *args, **kwargs):
state.config.led.decrease_brightness()
return state
def led_ani(key, state, *args, **kwargs):
state.config.led.increase_ani()
return state
def led_and(key, state, *args, **kwargs):
state.config.led.decrease_ani()
return state
def led_mode_static(key, state, *args, **kwargs):
state.config.led.effect_init = True
state.config.led.animation_mode = 'static'
return state
def led_mode_breathe(key, state, *args, **kwargs):
state.config.led.effect_init = True
state.config.led.animation_mode = 'breathing'
return state
def bt_clear_bonds(key, state, *args, **kwargs):
state.config._hid_helper_inst.clear_bonds()
return state
def bt_next_conn(key, state, *args, **kwargs):
state.config._hid_helper_inst.next_connection()
return state
def bt_prev_conn(key, state, *args, **kwargs):
state.config._hid_helper_inst.previous_connection()
return state
| from kmk.kmktime import sleep_ms
def passthrough(key, state, *args, **kwargs):
return state
def default_pressed(key, state, KC, coord_int=None, coord_raw=None):
state.hid_pending = True
if coord_int is not None:
state.coord_keys_pressed[coord_int] = key
state.keys_pressed.add(key)
return state
def default_released(key, state, KC, coord_int=None, coord_raw=None):
state.hid_pending = True
state.keys_pressed.discard(key)
if coord_int is not None:
state.keys_pressed.discard(state.coord_keys_pressed.get(coord_int, None))
state.coord_keys_pressed[coord_int] = None
return state
def reset(*args, **kwargs):
try:
import machine
machine.reset()
except ImportError:
import microcontroller
microcontroller.reset()
def bootloader(*args, **kwargs):
try:
import machine
machine.bootloader()
except ImportError:
import microcontroller
microcontroller.on_next_reset(microcontroller.RunMode.BOOTLOADER)
microcontroller.reset()
def debug_pressed(key, state, KC, *args, **kwargs):
if state.config.debug_enabled:
print('DebugDisable()')
else:
print('DebugEnable()')
state.config.debug_enabled = not state.config.debug_enabled
return state
def gesc_pressed(key, state, KC, *args, **kwargs):
GESC_TRIGGERS = {KC.LSHIFT, KC.RSHIFT, KC.LGUI, KC.RGUI}
if GESC_TRIGGERS.intersection(state.keys_pressed):
# First, release GUI if already pressed
state.config._send_hid()
# if Shift is held, KC_GRAVE will become KC_TILDE on OS level
state.keys_pressed.add(KC.GRAVE)
state.hid_pending = True
return state
# else return KC_ESC
state.keys_pressed.add(KC.ESCAPE)
state.hid_pending = True
return state
def gesc_released(key, state, KC, *args, **kwargs):
state.keys_pressed.discard(KC.ESCAPE)
state.keys_pressed.discard(KC.GRAVE)
state.hid_pending = True
return state
def bkdl_pressed(key, state, KC, *args, **kwargs):
BKDL_TRIGGERS = {KC.LGUI, KC.RGUI}
if BKDL_TRIGGERS.intersection(state.keys_pressed):
state.config._send_hid()
state.keys_pressed.add(KC.DEL)
state.hid_pending = True
return state
# else return KC_ESC
state.keys_pressed.add(KC.BKSP)
state.hid_pending = True
return state
def bkdl_released(key, state, KC, *args, **kwargs):
state.keys_pressed.discard(KC.BKSP)
state.keys_pressed.discard(KC.DEL)
state.hid_pending = True
return state
def sleep_pressed(key, state, KC, *args, **kwargs):
sleep_ms(key.meta.ms)
return state
def uc_mode_pressed(key, state, *args, **kwargs):
state.config.unicode_mode = key.meta.mode
return state
def leader_pressed(key, state, *args, **kwargs):
return state._begin_leader_mode()
def td_pressed(key, state, *args, **kwargs):
return state._process_tap_dance(key, True)
def td_released(key, state, *args, **kwargs):
return state._process_tap_dance(key, False)
def rgb_tog(key, state, *args, **kwargs):
if state.config.pixels.animation_mode == 'static_standby':
state.config.pixels.animation_mode = 'static'
state.config.pixels.enabled = not state.config.pixels.enabled
return state
def rgb_hui(key, state, *args, **kwargs):
state.config.pixels.increase_hue()
return state
def rgb_hud(key, state, *args, **kwargs):
state.config.pixels.decrease_hue()
return state
def rgb_sai(key, state, *args, **kwargs):
state.config.pixels.increase_sat()
return state
def rgb_sad(key, state, *args, **kwargs):
state.config.pixels.decrease_sat()
return state
def rgb_vai(key, state, *args, **kwargs):
state.config.pixels.increase_val()
return state
def rgb_vad(key, state, *args, **kwargs):
state.config.pixels.decrease_val()
return state
def rgb_ani(key, state, *args, **kwargs):
state.config.pixels.increase_ani()
return state
def rgb_and(key, state, *args, **kwargs):
state.config.pixels.decrease_ani()
return state
def rgb_mode_static(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'static'
return state
def rgb_mode_breathe(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'breathing'
return state
def rgb_mode_breathe_rainbow(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'breathing_rainbow'
return state
def rgb_mode_rainbow(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'rainbow'
return state
def rgb_mode_swirl(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'swirl'
return state
def rgb_mode_knight(key, state, *args, **kwargs):
state.config.pixels.effect_init = True
state.config.pixels.animation_mode = 'knight'
return state
def led_tog(key, state, *args, **kwargs):
if state.config.led.animation_mode == 'static_standby':
state.config.led.animation_mode = 'static'
state.config.led.enabled = not state.config.led.enabled
return state
def led_inc(key, state, *args, **kwargs):
state.config.led.increase_brightness()
return state
def led_dec(key, state, *args, **kwargs):
state.config.led.decrease_brightness()
return state
def led_ani(key, state, *args, **kwargs):
state.config.led.increase_ani()
return state
def led_and(key, state, *args, **kwargs):
state.config.led.decrease_ani()
return state
def led_mode_static(key, state, *args, **kwargs):
state.config.led.effect_init = True
state.config.led.animation_mode = 'static'
return state
def led_mode_breathe(key, state, *args, **kwargs):
state.config.led.effect_init = True
state.config.led.animation_mode = 'breathing'
return state
def bt_clear_bonds(key, state, *args, **kwargs):
state.config._hid_helper_inst.clear_bonds()
return state
def bt_next_conn(key, state, *args, **kwargs):
state.config._hid_helper_inst.next_connection()
return state
def bt_prev_conn(key, state, *args, **kwargs):
state.config._hid_helper_inst.previous_connection()
return state
| en | 0.77343 | # First, release GUI if already pressed # if Shift is held, KC_GRAVE will become KC_TILDE on OS level # else return KC_ESC # else return KC_ESC | 2.364522 | 2 |
yatube/posts/views.py | LHLHLHE/yatube_project | 0 | 6619969 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import cache_page
from .forms import CommentForm, PostForm
from .models import Follow, Group, Post, User
from .settings import POSTS_ON_PAGE
INDEX_HTML = 'posts/index.html'
GROUP_HTML = 'posts/group_list.html'
PROFILE_HTML = 'posts/profile.html'
DETAIL_HTML = 'posts/post_detail.html'
CREATE_HTML = 'posts/create_post.html'
FOLLOW_INDEX_HTML = 'posts/follow.html'
def page_obj(request, model):
post_list = model.all()
paginator = Paginator(post_list, POSTS_ON_PAGE)
page_number = request.GET.get('page')
return paginator.get_page(page_number)
@cache_page(20, key_prefix='index_page')
def index(request):
return render(request, INDEX_HTML, {
'page_obj': page_obj(request, Post.objects),
'index': True
})
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
return render(request, GROUP_HTML, {
'group': group,
'page_obj': page_obj(request, group.posts),
})
def profile(request, username):
author = get_object_or_404(User, username=username)
following = (request.user.is_authenticated and request.user != author
and Follow.objects.filter(user=request.user,
author=author).exists())
return render(request, PROFILE_HTML, {
'page_obj': page_obj(request, author.posts),
'author': author,
'following': following,
})
def post_detail(request, post_id):
return render(request, DETAIL_HTML, {
'post': get_object_or_404(Post, id=post_id),
'form': CommentForm(request.POST or None),
})
@login_required
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if not form.is_valid():
return render(request, CREATE_HTML, {
'form': form,
})
form.instance.author = request.user
form.save()
return redirect('posts:profile', username=request.user)
@login_required
def post_edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
if post.author != request.user:
return redirect('posts:post_detail', post_id=post_id)
form = PostForm(
request.POST or None,
request.FILES or None,
instance=post
)
if not form.is_valid():
return render(request, CREATE_HTML, {
'form': form,
'is_edit': True,
})
form.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def follow_index(request):
return render(request, FOLLOW_INDEX_HTML, {
'page_obj': page_obj(
request,
Post.objects.filter(author__following__user=request.user))
})
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if (request.user != author and not
Follow.objects.filter(user=request.user, author=author).exists()):
Follow.objects.create(user=request.user, author=author)
return redirect('posts:profile', username=username)
@login_required
def profile_unfollow(request, username):
follow = get_object_or_404(
Follow,
user=request.user,
author__username=username)
follow.delete()
return redirect('posts:profile', username=username)
| from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import cache_page
from .forms import CommentForm, PostForm
from .models import Follow, Group, Post, User
from .settings import POSTS_ON_PAGE
INDEX_HTML = 'posts/index.html'
GROUP_HTML = 'posts/group_list.html'
PROFILE_HTML = 'posts/profile.html'
DETAIL_HTML = 'posts/post_detail.html'
CREATE_HTML = 'posts/create_post.html'
FOLLOW_INDEX_HTML = 'posts/follow.html'
def page_obj(request, model):
post_list = model.all()
paginator = Paginator(post_list, POSTS_ON_PAGE)
page_number = request.GET.get('page')
return paginator.get_page(page_number)
@cache_page(20, key_prefix='index_page')
def index(request):
return render(request, INDEX_HTML, {
'page_obj': page_obj(request, Post.objects),
'index': True
})
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
return render(request, GROUP_HTML, {
'group': group,
'page_obj': page_obj(request, group.posts),
})
def profile(request, username):
author = get_object_or_404(User, username=username)
following = (request.user.is_authenticated and request.user != author
and Follow.objects.filter(user=request.user,
author=author).exists())
return render(request, PROFILE_HTML, {
'page_obj': page_obj(request, author.posts),
'author': author,
'following': following,
})
def post_detail(request, post_id):
return render(request, DETAIL_HTML, {
'post': get_object_or_404(Post, id=post_id),
'form': CommentForm(request.POST or None),
})
@login_required
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if not form.is_valid():
return render(request, CREATE_HTML, {
'form': form,
})
form.instance.author = request.user
form.save()
return redirect('posts:profile', username=request.user)
@login_required
def post_edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
if post.author != request.user:
return redirect('posts:post_detail', post_id=post_id)
form = PostForm(
request.POST or None,
request.FILES or None,
instance=post
)
if not form.is_valid():
return render(request, CREATE_HTML, {
'form': form,
'is_edit': True,
})
form.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def follow_index(request):
return render(request, FOLLOW_INDEX_HTML, {
'page_obj': page_obj(
request,
Post.objects.filter(author__following__user=request.user))
})
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if (request.user != author and not
Follow.objects.filter(user=request.user, author=author).exists()):
Follow.objects.create(user=request.user, author=author)
return redirect('posts:profile', username=username)
@login_required
def profile_unfollow(request, username):
follow = get_object_or_404(
Follow,
user=request.user,
author__username=username)
follow.delete()
return redirect('posts:profile', username=username)
| none | 1 | 2.044005 | 2 | |
week05/example4-2.py | cbchoi/sit32006 | 0 | 6619970 | from wtforms import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | from wtforms import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | none | 1 | 2.614106 | 3 | |
utils.py | goncamateus/pytorch-soft-actor-critic | 0 | 6619971 | import math
import numpy as np
import torch
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def get_her_goal(env):
return np.array([env.position_on_track, env.get_track_err()[0]])
def get_goal(env):
chkpt = env.track.checkpoints[env.track.next_checkpoint_idx]
nearest_point = env.track.nearest_point(env.bot_pos())
idx = np.where(env.track.x == nearest_point[0])[0][0]
des_ang = env.track.angle_at_index(idx)
des_ang = np.degrees(des_ang)
return np.array([chkpt, 0.0])
| import math
import numpy as np
import torch
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def get_her_goal(env):
return np.array([env.position_on_track, env.get_track_err()[0]])
def get_goal(env):
chkpt = env.track.checkpoints[env.track.next_checkpoint_idx]
nearest_point = env.track.nearest_point(env.bot_pos())
idx = np.where(env.track.x == nearest_point[0])[0][0]
des_ang = env.track.angle_at_index(idx)
des_ang = np.degrees(des_ang)
return np.array([chkpt, 0.0])
| none | 1 | 2.139482 | 2 | |
test_utilities/src/d1_test/mock_api/echo_credentials.py | DataONEorg/d1_python | 15 | 6619972 | <reponame>DataONEorg/d1_python
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock: CNDiagnostic.echoCredentials() → SubjectInfo.
https://releases.dataone.org/online/api-documentation-v2.0/apis/CN_APIs.html
#CNDiagnostic.echoCredentials
Not available in v1.
A DataONEException can be triggered by adding a custom header. See d1_exception.py
"""
import logging
import re
import responses
import d1_common.const
import d1_common.url
import d1_test.mock_api.d1_exception
import d1_test.test_files
# Config
N_TOTAL = 100
ECHO_CREDENTIALS_ENDPOINT_RX = r"v([23])/diag/subject"
def add_callback(base_url):
responses.add_callback(
responses.GET,
re.compile(
r"^"
+ d1_common.url.joinPathElements(base_url, ECHO_CREDENTIALS_ENDPOINT_RX)
),
callback=_request_callback,
content_type="",
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
header_dict = {"Content-Type": d1_common.const.CONTENT_TYPE_XML}
return (
200,
header_dict,
d1_test.test_files.load_bin("xml/subject_info_only_person_records_1.xml"),
)
| #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock: CNDiagnostic.echoCredentials() → SubjectInfo.
https://releases.dataone.org/online/api-documentation-v2.0/apis/CN_APIs.html
#CNDiagnostic.echoCredentials
Not available in v1.
A DataONEException can be triggered by adding a custom header. See d1_exception.py
"""
import logging
import re
import responses
import d1_common.const
import d1_common.url
import d1_test.mock_api.d1_exception
import d1_test.test_files
# Config
N_TOTAL = 100
ECHO_CREDENTIALS_ENDPOINT_RX = r"v([23])/diag/subject"
def add_callback(base_url):
responses.add_callback(
responses.GET,
re.compile(
r"^"
+ d1_common.url.joinPathElements(base_url, ECHO_CREDENTIALS_ENDPOINT_RX)
),
callback=_request_callback,
content_type="",
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
header_dict = {"Content-Type": d1_common.const.CONTENT_TYPE_XML}
return (
200,
header_dict,
d1_test.test_files.load_bin("xml/subject_info_only_person_records_1.xml"),
) | en | 0.787499 | #!/usr/bin/env python # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Mock: CNDiagnostic.echoCredentials() → SubjectInfo. https://releases.dataone.org/online/api-documentation-v2.0/apis/CN_APIs.html #CNDiagnostic.echoCredentials Not available in v1. A DataONEException can be triggered by adding a custom header. See d1_exception.py # Config # Return DataONEException if triggered | 2.091758 | 2 |
SOLID-Principles/blackjack_notSOLID.py | j-hmd/daily-python | 0 | 6619973 | # Implementation of card and shoe for a Black Jack game
# Goal is to analyze the SOLID principles:
# Single Responsibility, Open/Close, Liskov Substitution, Interface Segregation , Dependency Inversion
# Code inspired in Steven Lott's course.
import random
"""
This class has some problems with it,
it both implements a deck of cards, and the cards themselves
It also implements the point counting, which is particular to BlackJack, so
we wouldn't be able to reuse this class if we wanted to.
"""
class Card():
def __init__(self):
self.cards = [(rank, suit) # A list of tuples, which represents a single card
for rank in range(1, 14) # range doesn't include the last number!
for suit in 'HSCD'] # Hearts, Spades, Clubs, Diamonds
random.shuffle(self.cards)
def deal(self):
return self.cards.pop()
def points(self, card):
rank, suit = card
if rank == 1:
return (1,11)
elif 2 <= rank < 11:
return (rank,rank)
else:
return (10,10)
class Shoe(Card):
def __init__(self, n):
super().__init__()
self.shoe = []
for _ in range(n):
self.shoe.extend(self.cards)
random.shuffle(self.shoe)
def shuffle_burn(self, n=100):
random.shuffle(self.shoe)
self.shoe = self.shoe[n:]
def deal():
return self.shoe.pop()
"""
Articulating the problems:
- Mixed responsibilities: card, deck, points
- Missing responsibilities: total points to the black jack game?
- Limit reuse: can't use for cribbage for example
- Not substitutable: can't use a shoe in the place of a card
- Haphazard interface: (?) should be iterable.
Goals for our solid design: prevent problems, and differentiate changes that would be relevant.
"""
| # Implementation of card and shoe for a Black Jack game
# Goal is to analyze the SOLID principles:
# Single Responsibility, Open/Close, Liskov Substitution, Interface Segregation , Dependency Inversion
# Code inspired in Steven Lott's course.
import random
"""
This class has some problems with it,
it both implements a deck of cards, and the cards themselves
It also implements the point counting, which is particular to BlackJack, so
we wouldn't be able to reuse this class if we wanted to.
"""
class Card():
def __init__(self):
self.cards = [(rank, suit) # A list of tuples, which represents a single card
for rank in range(1, 14) # range doesn't include the last number!
for suit in 'HSCD'] # Hearts, Spades, Clubs, Diamonds
random.shuffle(self.cards)
def deal(self):
return self.cards.pop()
def points(self, card):
rank, suit = card
if rank == 1:
return (1,11)
elif 2 <= rank < 11:
return (rank,rank)
else:
return (10,10)
class Shoe(Card):
def __init__(self, n):
super().__init__()
self.shoe = []
for _ in range(n):
self.shoe.extend(self.cards)
random.shuffle(self.shoe)
def shuffle_burn(self, n=100):
random.shuffle(self.shoe)
self.shoe = self.shoe[n:]
def deal():
return self.shoe.pop()
"""
Articulating the problems:
- Mixed responsibilities: card, deck, points
- Missing responsibilities: total points to the black jack game?
- Limit reuse: can't use for cribbage for example
- Not substitutable: can't use a shoe in the place of a card
- Haphazard interface: (?) should be iterable.
Goals for our solid design: prevent problems, and differentiate changes that would be relevant.
"""
| en | 0.914766 | # Implementation of card and shoe for a Black Jack game # Goal is to analyze the SOLID principles: # Single Responsibility, Open/Close, Liskov Substitution, Interface Segregation , Dependency Inversion # Code inspired in Steven Lott's course. This class has some problems with it, it both implements a deck of cards, and the cards themselves It also implements the point counting, which is particular to BlackJack, so we wouldn't be able to reuse this class if we wanted to. # A list of tuples, which represents a single card # range doesn't include the last number! # Hearts, Spades, Clubs, Diamonds Articulating the problems: - Mixed responsibilities: card, deck, points - Missing responsibilities: total points to the black jack game? - Limit reuse: can't use for cribbage for example - Not substitutable: can't use a shoe in the place of a card - Haphazard interface: (?) should be iterable. Goals for our solid design: prevent problems, and differentiate changes that would be relevant. | 3.924211 | 4 |
Weather_Station/test_weather_station.py | opendatadurban/citizen_sensors | 1 | 6619974 | '''
Weather station:
One script to rule them all...
HMH - 18/07/2018
'''
import sys,time,os
import Adafruit_DHT, Adafruit_MCP3008
import Adafruit_GPIO.SPI as SPI
import RPi.GPIO as GPIO
import spidev
import numpy as np
from gpiozero import DigitalInputDevice
from time import sleep
#import math
#import subprocess
import datetime,requests,json
import smtplib
from email.mime.text import MIMEText
import simple_read_windspeed as srw
#import analog_read as ar
try:
import aqi
except:
print 'USB not connected'
import platform, string
def sendemail(from_addr, to_addr_list,
subject, message,
login, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
return problems
def get_temp_hum(sensor,pin):
t_array = np.zeros(10)
h_array = np.zeros(10)
for i in range(0,len(t_array)):
h_array[i], t_array[i] = Adafruit_DHT.read_retry(sensor, pin)
humidity = np.median(h_array)
temperature = np.median(t_array)
return humidity, temperature
def windspeed_helper():
count = 0
wind_speed_sensor = srw.DigitalInputDevice(5)
wind_speed_sensor.when_activated = srw.spin
time_interval = 0.5*60 # seconds
time_later = time.time()
timestamp = time.time()
wind_array = []
while time_later < timestamp + time_interval:
srw.count = 0
srw.sleep(5)
instantaneous_windspeed = srw.get_windspeed()
if count == 1:
instantaneous_windspeed = 0.0
wind_array.append(instantaneous_windspeed)
time_later = time.time()
#windspeed = srw.calculate_speed(5)
#wind_array = simple_read_windspeed.wind_val
#windspeed = np.mean(wind_array)
#print "value from anemometer: ",wind_array
return wind_array
def dust_helper():
pm25 = []
pm10 = []
aqi.cmd_set_sleep(0)
aqi.cmd_set_mode(1);
for t in range(15):
values = aqi.cmd_query_data();
if values is not None:
pm25.append(values[0])
pm10.append(values[1])
time.sleep(2)
#print pm10
#print pm25
#print("Going to sleep for 5min...")
aqi.cmd_set_mode(0);
aqi.cmd_set_sleep()
#time.sleep(300)
return pm10,pm25
def read_analog(numSamples,pinVal):
#Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Choose GPIO pin - not actually sure if we need this, but leaving it in for meow
ledPin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin,GPIO.OUT)
samplingTime = 280.0
deltaTime = 40.0
sleepTime = 9680.0
return_array = []
try:
for i in range(0,numSamples):
GPIO.output(ledPin,0)
time.sleep(samplingTime*10.0**-6)
# The read_adc function will get the value of the specified channel
voMeasured = mcp.read_adc(pinVal)
time.sleep(samplingTime*10.0**-6)
GPIO.output(ledPin,1)
time.sleep(samplingTime*10.0**-6)
calcVoltage = voMeasured*(5.0/1024)
return_array.append(calcVoltage)
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
return return_array
if __name__=="__main__":
error_log_name = 'error_log.txt'
erf = open(error_log_name,'a')
myname = os.uname()[1]
try:
# Send email to let human know I'm alive
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'System has restarted',
message = 'Weather station '+myname+' has rebooted and the script is running!',
login = 'oddweatherstation',
password = '<PASSWORD>')
except Exception as e:
print "Gmail doesn't like the machine"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
erf.close()
print "Welcome to your local weather station. Sit back, relax, and have the weather measured at you. Some of the measurements take some time, so if it looks like nothing is happening, chill for a while. If nothing continues to happen, then perhaps something strange is on your foot."
# set operations flags:
Temp_flag = 0
WS_flag = 0
WD_flag = 0
Gas_flag = 0
Dust_flag = 0
data_loc = '/home/pi/Desktop/Weather_Station/data/'
p = platform.system()
if p == 'Windows':
data_loc = string.replace(data_loc,'/','\\')
Zuma = 'notmypresident'
while Zuma == 'notmypresident': #notmypresident
timestamp = time.time() # UTC
file_time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y_%m_%d_%H_%M_%S')
file_name = data_loc+'data_'+file_time+'.txt'
f = open(file_name,'a')
erf = open(error_log_name,'a')
time_interval = 24*60*60 # seconds
time_later = time.time()
while time_later < timestamp + time_interval:
# Temperature and humidity:
m_time = time.time()
print "The time is...:", m_time
print "Yeah... bet you can read that..."
print "Checking temperature and humidity"
try:
sensor2 = Adafruit_DHT.DHT22
pin2=24
humidity, temperature = get_temp_hum(sensor2,pin2)
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
except Exception as e:
print 'Failed to get temperature and humidity reading'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Temp_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Temperature sensor down',
message = 'Weather station '+myname+' temperature gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
Temp_flag = 1
except:
print "Gmail doesn't like the machine"
# Gas
print "Smelling gas"
try:
gas_array = read_analog(numSamples=10,pinVal=1)
#print gas_array
gas = np.mean(gas_array)
print 'Gas = ',gas
except Exception as e:
print "We have a gas issue..."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Gas_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Gas sensor down',
message = 'Weather station '+myname+' gas gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
Gas_flag = 1
except:
print "Gmail doesn't like the machine"
# Dust
print "Eating dust"
try:
pm10_array,pm25_array = dust_helper()
pm10 = np.median(pm10_array) # 10 microns
pm25 = np.median(pm25_array) # 2.5 microns
print 'pm 2.5 = {0:0.1f}, pm 10 = {1:0.1f}'.format(pm25,pm10)
#print 'chilling for a while'
#time.sleep(300) # this can be removed once the timing is sorted out - just here for now to stop the fan spinning up every 3 seconds
except Exception as e:
print"We are but shadows and dust, but not dust in the wind."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Dust_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Dust sensor down',
message = 'Weather station '+myname+' dust gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
Dust_flag = 1
except:
print "Gmail doesn't like the machine"
# Run wind stuff for 300 seconds...
# Windspeed
print "Checking wind speed"
try:
windspeed_array = windspeed_helper()
windspeed = np.median(windspeed_array)
print 'Wind={0:0.1f} kph'.format(windspeed)
except Exception as e:
print 'Wind failed to pass'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WS_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Wind speed sensor down',
message = 'Weather station '+myname+' windspeed gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
WS_flag = 1
except:
print "Gmail doesn't like the machine"
# Wind Direction
print "Checking wind direction"
try:
wind_dir_array = read_analog(numSamples=10,pinVal=3)
winddir = np.median(wind_dir_array)
print 'Wind direction = {0:0.1f}'.format(winddir)
except Exception as e:
print "the wind is lacking direction"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WD_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Wind direction sensor down',
message = 'Weather station '+myname+' wind direction gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
WD_flag = 1
except:
print "Gmail doesn't like the machine"
'''
print 'recording data'
line = str(temperature)+','+str(humidity)+','+str(windspeed)+','+str(winddir)+','+str(gas)+','+str(pm10)+','+str(pm25)+','+str(m_time)
f.write(line)
f.write('\n')
print 'talking to server'
# post to the village
payload = {'temp': temperature,'humid':humidity,'rain' : 0.0, 'press': 0.0}
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
try:
r = requests.post("http://citizen-sensors.herokuapp.com/ewok-village-5000", data=json.dumps(payload),headers=headers)
except Exception as e:
print "Server not listening to me - no one ever listens to me!!!"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
time.sleep(10)
time_later = time.time()
'''
f.close()
erf.close()
| '''
Weather station:
One script to rule them all...
HMH - 18/07/2018
'''
import sys,time,os
import Adafruit_DHT, Adafruit_MCP3008
import Adafruit_GPIO.SPI as SPI
import RPi.GPIO as GPIO
import spidev
import numpy as np
from gpiozero import DigitalInputDevice
from time import sleep
#import math
#import subprocess
import datetime,requests,json
import smtplib
from email.mime.text import MIMEText
import simple_read_windspeed as srw
#import analog_read as ar
try:
import aqi
except:
print 'USB not connected'
import platform, string
def sendemail(from_addr, to_addr_list,
subject, message,
login, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
return problems
def get_temp_hum(sensor,pin):
t_array = np.zeros(10)
h_array = np.zeros(10)
for i in range(0,len(t_array)):
h_array[i], t_array[i] = Adafruit_DHT.read_retry(sensor, pin)
humidity = np.median(h_array)
temperature = np.median(t_array)
return humidity, temperature
def windspeed_helper():
count = 0
wind_speed_sensor = srw.DigitalInputDevice(5)
wind_speed_sensor.when_activated = srw.spin
time_interval = 0.5*60 # seconds
time_later = time.time()
timestamp = time.time()
wind_array = []
while time_later < timestamp + time_interval:
srw.count = 0
srw.sleep(5)
instantaneous_windspeed = srw.get_windspeed()
if count == 1:
instantaneous_windspeed = 0.0
wind_array.append(instantaneous_windspeed)
time_later = time.time()
#windspeed = srw.calculate_speed(5)
#wind_array = simple_read_windspeed.wind_val
#windspeed = np.mean(wind_array)
#print "value from anemometer: ",wind_array
return wind_array
def dust_helper():
pm25 = []
pm10 = []
aqi.cmd_set_sleep(0)
aqi.cmd_set_mode(1);
for t in range(15):
values = aqi.cmd_query_data();
if values is not None:
pm25.append(values[0])
pm10.append(values[1])
time.sleep(2)
#print pm10
#print pm25
#print("Going to sleep for 5min...")
aqi.cmd_set_mode(0);
aqi.cmd_set_sleep()
#time.sleep(300)
return pm10,pm25
def read_analog(numSamples,pinVal):
#Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Choose GPIO pin - not actually sure if we need this, but leaving it in for meow
ledPin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin,GPIO.OUT)
samplingTime = 280.0
deltaTime = 40.0
sleepTime = 9680.0
return_array = []
try:
for i in range(0,numSamples):
GPIO.output(ledPin,0)
time.sleep(samplingTime*10.0**-6)
# The read_adc function will get the value of the specified channel
voMeasured = mcp.read_adc(pinVal)
time.sleep(samplingTime*10.0**-6)
GPIO.output(ledPin,1)
time.sleep(samplingTime*10.0**-6)
calcVoltage = voMeasured*(5.0/1024)
return_array.append(calcVoltage)
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
return return_array
if __name__=="__main__":
error_log_name = 'error_log.txt'
erf = open(error_log_name,'a')
myname = os.uname()[1]
try:
# Send email to let human know I'm alive
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'System has restarted',
message = 'Weather station '+myname+' has rebooted and the script is running!',
login = 'oddweatherstation',
password = '<PASSWORD>')
except Exception as e:
print "Gmail doesn't like the machine"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
erf.close()
print "Welcome to your local weather station. Sit back, relax, and have the weather measured at you. Some of the measurements take some time, so if it looks like nothing is happening, chill for a while. If nothing continues to happen, then perhaps something strange is on your foot."
# set operations flags:
Temp_flag = 0
WS_flag = 0
WD_flag = 0
Gas_flag = 0
Dust_flag = 0
data_loc = '/home/pi/Desktop/Weather_Station/data/'
p = platform.system()
if p == 'Windows':
data_loc = string.replace(data_loc,'/','\\')
Zuma = 'notmypresident'
while Zuma == 'notmypresident': #notmypresident
timestamp = time.time() # UTC
file_time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y_%m_%d_%H_%M_%S')
file_name = data_loc+'data_'+file_time+'.txt'
f = open(file_name,'a')
erf = open(error_log_name,'a')
time_interval = 24*60*60 # seconds
time_later = time.time()
while time_later < timestamp + time_interval:
# Temperature and humidity:
m_time = time.time()
print "The time is...:", m_time
print "Yeah... bet you can read that..."
print "Checking temperature and humidity"
try:
sensor2 = Adafruit_DHT.DHT22
pin2=24
humidity, temperature = get_temp_hum(sensor2,pin2)
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
except Exception as e:
print 'Failed to get temperature and humidity reading'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Temp_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Temperature sensor down',
message = 'Weather station '+myname+' temperature gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
Temp_flag = 1
except:
print "Gmail doesn't like the machine"
# Gas
print "Smelling gas"
try:
gas_array = read_analog(numSamples=10,pinVal=1)
#print gas_array
gas = np.mean(gas_array)
print 'Gas = ',gas
except Exception as e:
print "We have a gas issue..."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Gas_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Gas sensor down',
message = 'Weather station '+myname+' gas gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
Gas_flag = 1
except:
print "Gmail doesn't like the machine"
# Dust
print "Eating dust"
try:
pm10_array,pm25_array = dust_helper()
pm10 = np.median(pm10_array) # 10 microns
pm25 = np.median(pm25_array) # 2.5 microns
print 'pm 2.5 = {0:0.1f}, pm 10 = {1:0.1f}'.format(pm25,pm10)
#print 'chilling for a while'
#time.sleep(300) # this can be removed once the timing is sorted out - just here for now to stop the fan spinning up every 3 seconds
except Exception as e:
print"We are but shadows and dust, but not dust in the wind."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Dust_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Dust sensor down',
message = 'Weather station '+myname+' dust gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
Dust_flag = 1
except:
print "Gmail doesn't like the machine"
# Run wind stuff for 300 seconds...
# Windspeed
print "Checking wind speed"
try:
windspeed_array = windspeed_helper()
windspeed = np.median(windspeed_array)
print 'Wind={0:0.1f} kph'.format(windspeed)
except Exception as e:
print 'Wind failed to pass'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WS_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Wind speed sensor down',
message = 'Weather station '+myname+' windspeed gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
WS_flag = 1
except:
print "Gmail doesn't like the machine"
# Wind Direction
print "Checking wind direction"
try:
wind_dir_array = read_analog(numSamples=10,pinVal=3)
winddir = np.median(wind_dir_array)
print 'Wind direction = {0:0.1f}'.format(winddir)
except Exception as e:
print "the wind is lacking direction"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WD_flag == 0:
try:
sendemail(from_addr = '<EMAIL>',
to_addr_list = ['<EMAIL>'],
subject = 'Wind direction sensor down',
message = 'Weather station '+myname+' wind direction gauge is not working',
login = 'oddweatherstation',
password = '<PASSWORD>')
WD_flag = 1
except:
print "Gmail doesn't like the machine"
'''
print 'recording data'
line = str(temperature)+','+str(humidity)+','+str(windspeed)+','+str(winddir)+','+str(gas)+','+str(pm10)+','+str(pm25)+','+str(m_time)
f.write(line)
f.write('\n')
print 'talking to server'
# post to the village
payload = {'temp': temperature,'humid':humidity,'rain' : 0.0, 'press': 0.0}
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
try:
r = requests.post("http://citizen-sensors.herokuapp.com/ewok-village-5000", data=json.dumps(payload),headers=headers)
except Exception as e:
print "Server not listening to me - no one ever listens to me!!!"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
time.sleep(10)
time_later = time.time()
'''
f.close()
erf.close()
| en | 0.49729 | Weather station: One script to rule them all... HMH - 18/07/2018 #import math #import subprocess #import analog_read as ar # seconds #windspeed = srw.calculate_speed(5) #wind_array = simple_read_windspeed.wind_val #windspeed = np.mean(wind_array) #print "value from anemometer: ",wind_array #print pm10 #print pm25 #print("Going to sleep for 5min...") #time.sleep(300) #Hardware SPI configuration: # Choose GPIO pin - not actually sure if we need this, but leaving it in for meow # The read_adc function will get the value of the specified channel # Send email to let human know I'm alive # set operations flags: #notmypresident # UTC # seconds # Temperature and humidity: # Gas #print gas_array # Dust # 10 microns # 2.5 microns #print 'chilling for a while' #time.sleep(300) # this can be removed once the timing is sorted out - just here for now to stop the fan spinning up every 3 seconds # Run wind stuff for 300 seconds... # Windspeed # Wind Direction print 'recording data' line = str(temperature)+','+str(humidity)+','+str(windspeed)+','+str(winddir)+','+str(gas)+','+str(pm10)+','+str(pm25)+','+str(m_time) f.write(line) f.write('\n') print 'talking to server' # post to the village payload = {'temp': temperature,'humid':humidity,'rain' : 0.0, 'press': 0.0} headers = {'Content-Type': 'application/json', 'Accept':'application/json'} try: r = requests.post("http://citizen-sensors.herokuapp.com/ewok-village-5000", data=json.dumps(payload),headers=headers) except Exception as e: print "Server not listening to me - no one ever listens to me!!!" etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') erf.write(etime) erf.write('\n') erf.write(str(e)) erf.write('\n') time.sleep(10) time_later = time.time() | 2.617485 | 3 |
src/xml2txt.py | psorianom/pseudo-de-pseudonymizer | 2 | 6619975 | '''Transforms XML files into format CoNLL (one token per line)
Usage:
xml2txt.py <i> <o> [options]
Arguments:
<i> An input file or directory (if dir it will convert all txt files inside).
<o> An output directory.
--num=<n> NUM Number of decisions [default: 200]
-r RATIO Ratio train/dev/test [default: 60/20/20]
'''
import os
import xml.etree.ElementTree
import glob
import logging
from argopt import argopt
import numpy as np
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def generate_datasets(array_documents, ratio):
splitted = np.array([int(d) for d in ratio.split("/")]) / 100
if len(splitted) != 3:
logger.error("We need three integers as ratio!!")
exit(-1)
r_train, r_dev, r_test = splitted
n_docs = len(array_documents)
all_indices = np.arange(len(array_documents))
train_indices = np.random.choice(all_indices, int(r_train * n_docs), replace=False)
rest_indices = np.setdiff1d(all_indices, train_indices)
dev_indices = np.random.choice(rest_indices, int(r_dev * n_docs), replace=False)
test_indices = np.setdiff1d(rest_indices, dev_indices)
assert (len(np.intersect1d(train_indices, dev_indices)) == 0)
assert (len(np.intersect1d(train_indices, test_indices)) == 0)
assert (len(np.intersect1d(test_indices, dev_indices)) == 0)
return array_documents[train_indices], array_documents[dev_indices], array_documents[test_indices]
def xml2txt(files_to_treat, label, output_path):
# files_to_treat = np.array(list(glob.glob('../data/dev/*.xml')))
n_files = len(files_to_treat)
output_file = os.path.join(output_path, label + ".txt")
with open(output_file, "w") as filo:
for i,f in enumerate(files_to_treat):
print("Treating file {0} => {1}/{2}\n".format(f, i+1 , n_files))
e = xml.etree.ElementTree.parse(f).getroot()
try:
text = [t for t in e.find("TEXTE").find("BLOC_TEXTUEL").find("CONTENU").itertext()]
space_text = "\n".join(text)
filo.write("".join(space_text) + "\n")
except Exception as e:
print("Could not parse file {}\n because {}".format(f, e))
if __name__ == '__main__':
parser = argopt(__doc__).parse_args()
input_path = parser.i
output_path = parser.o
number_decisions = int(parser.num)
if parser.r:
ratio = parser.r
# all_files = np.array(list(glob.glob('../data/dev/*.xml')))
all_files = np.array(list(glob.glob(input_path)))
# take sample
all_files = np.random.choice(all_files, number_decisions, replace=False)
train, dev, test = generate_datasets(all_files, ratio)
for t, l in zip([train, dev, test], ["train", "dev", "test"]):
xml2txt(t, l, output_path)
| '''Transforms XML files into format CoNLL (one token per line)
Usage:
xml2txt.py <i> <o> [options]
Arguments:
<i> An input file or directory (if dir it will convert all txt files inside).
<o> An output directory.
--num=<n> NUM Number of decisions [default: 200]
-r RATIO Ratio train/dev/test [default: 60/20/20]
'''
import os
import xml.etree.ElementTree
import glob
import logging
from argopt import argopt
import numpy as np
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def generate_datasets(array_documents, ratio):
splitted = np.array([int(d) for d in ratio.split("/")]) / 100
if len(splitted) != 3:
logger.error("We need three integers as ratio!!")
exit(-1)
r_train, r_dev, r_test = splitted
n_docs = len(array_documents)
all_indices = np.arange(len(array_documents))
train_indices = np.random.choice(all_indices, int(r_train * n_docs), replace=False)
rest_indices = np.setdiff1d(all_indices, train_indices)
dev_indices = np.random.choice(rest_indices, int(r_dev * n_docs), replace=False)
test_indices = np.setdiff1d(rest_indices, dev_indices)
assert (len(np.intersect1d(train_indices, dev_indices)) == 0)
assert (len(np.intersect1d(train_indices, test_indices)) == 0)
assert (len(np.intersect1d(test_indices, dev_indices)) == 0)
return array_documents[train_indices], array_documents[dev_indices], array_documents[test_indices]
def xml2txt(files_to_treat, label, output_path):
# files_to_treat = np.array(list(glob.glob('../data/dev/*.xml')))
n_files = len(files_to_treat)
output_file = os.path.join(output_path, label + ".txt")
with open(output_file, "w") as filo:
for i,f in enumerate(files_to_treat):
print("Treating file {0} => {1}/{2}\n".format(f, i+1 , n_files))
e = xml.etree.ElementTree.parse(f).getroot()
try:
text = [t for t in e.find("TEXTE").find("BLOC_TEXTUEL").find("CONTENU").itertext()]
space_text = "\n".join(text)
filo.write("".join(space_text) + "\n")
except Exception as e:
print("Could not parse file {}\n because {}".format(f, e))
if __name__ == '__main__':
parser = argopt(__doc__).parse_args()
input_path = parser.i
output_path = parser.o
number_decisions = int(parser.num)
if parser.r:
ratio = parser.r
# all_files = np.array(list(glob.glob('../data/dev/*.xml')))
all_files = np.array(list(glob.glob(input_path)))
# take sample
all_files = np.random.choice(all_files, number_decisions, replace=False)
train, dev, test = generate_datasets(all_files, ratio)
for t, l in zip([train, dev, test], ["train", "dev", "test"]):
xml2txt(t, l, output_path)
| en | 0.325271 | Transforms XML files into format CoNLL (one token per line) Usage: xml2txt.py <i> <o> [options] Arguments: <i> An input file or directory (if dir it will convert all txt files inside). <o> An output directory. --num=<n> NUM Number of decisions [default: 200] -r RATIO Ratio train/dev/test [default: 60/20/20] # files_to_treat = np.array(list(glob.glob('../data/dev/*.xml'))) # all_files = np.array(list(glob.glob('../data/dev/*.xml'))) # take sample | 3.08189 | 3 |
tests/test_mgit.py | zsimic/m | 3 | 6619976 | import os
import pytest
import runez
import mgit
def test_edge_cases():
assert mgit.git_parent_path("/") is None
assert mgit.git_parent_path(runez.DEV.tests_folder) == runez.DEV.project_folder
prefs = mgit.MgitPreferences(all=True, fetch=False, pull=False, short=None)
assert str(prefs) == "align all !fetch !pull !verbose"
prefs = mgit.MgitPreferences(name_size=5)
prefs.fetch = None
assert str(prefs) == "name_size=5"
prefs = mgit.MgitPreferences()
assert not str(prefs)
with pytest.raises(Exception):
prefs.update(foo=1)
def test_usage(cli):
cli.expect_success("--help")
cli.expect_success("--version")
cli.expect_failure("--foo", "No such option")
def test_status(cli):
# Note: using explicit lists below, to support case where used directory path may have a space in it
# [wouldn't work if args passed as string, due to naive split in run()]
# Status on a non-existing folder should fail
cli.expect_failure("foo", "No folder 'foo'")
# Status on this test folder should succeed and report no git folders found
cli.expect_success(cli.tests_folder, "no git folders")
# Status on project folder should succeed (we're not calling fetch)
project = runez.DEV.project_folder
cli.expect_success(project, "mgit")
with runez.CurrentFolder(project):
cli.run()
assert cli.succeeded
assert "%s:" % os.path.basename(project) in cli.logged.stdout
cli.expect_success("-cs")
| import os
import pytest
import runez
import mgit
def test_edge_cases():
assert mgit.git_parent_path("/") is None
assert mgit.git_parent_path(runez.DEV.tests_folder) == runez.DEV.project_folder
prefs = mgit.MgitPreferences(all=True, fetch=False, pull=False, short=None)
assert str(prefs) == "align all !fetch !pull !verbose"
prefs = mgit.MgitPreferences(name_size=5)
prefs.fetch = None
assert str(prefs) == "name_size=5"
prefs = mgit.MgitPreferences()
assert not str(prefs)
with pytest.raises(Exception):
prefs.update(foo=1)
def test_usage(cli):
cli.expect_success("--help")
cli.expect_success("--version")
cli.expect_failure("--foo", "No such option")
def test_status(cli):
# Note: using explicit lists below, to support case where used directory path may have a space in it
# [wouldn't work if args passed as string, due to naive split in run()]
# Status on a non-existing folder should fail
cli.expect_failure("foo", "No folder 'foo'")
# Status on this test folder should succeed and report no git folders found
cli.expect_success(cli.tests_folder, "no git folders")
# Status on project folder should succeed (we're not calling fetch)
project = runez.DEV.project_folder
cli.expect_success(project, "mgit")
with runez.CurrentFolder(project):
cli.run()
assert cli.succeeded
assert "%s:" % os.path.basename(project) in cli.logged.stdout
cli.expect_success("-cs")
| en | 0.896346 | # Note: using explicit lists below, to support case where used directory path may have a space in it # [wouldn't work if args passed as string, due to naive split in run()] # Status on a non-existing folder should fail # Status on this test folder should succeed and report no git folders found # Status on project folder should succeed (we're not calling fetch) | 1.894949 | 2 |
decipher/export_patients.py | phenopolis/phenopolis | 24 | 6619977 | <reponame>phenopolis/phenopolis
from __future__ import print_function
import sys
import pymongo
import json
import re
host='phenotips.cs.ucl.ac.uk'
conn = pymongo.MongoClient(host=host, port=27017)
db=conn['patients']
headers=["Internal reference number or ID","Chromosome","Start","Genome assembly","Reference allele","Alternate allele","Transcript","Gene name","Intergenic","Chromosomal sex","Open-access consent","Age at last clinical assessment","Prenatal age in weeks","Note","Inheritance","Pathogenicity","Phenotypes","HGVS code","Genotype","Responsible contact"]
#headers=["Internal reference number or ID","Genome assembly","Gene name","Open-access consent","Phenotypes","Responsible contact"]
print(','.join(map(lambda x: '"%s"'%x,headers)))
for p in db.patients.find({'external_id':{'$regex':re.compile('IRDC_.*_LON_.*')}}):
r=dict()
if 'GC' not in p['external_id']: continue
if 'genes' in p:
r["Gene name"]= ', '.join([g['gene'] for g in p['genes']])
else:
r["Gene name"]= ''
r["Internal reference number or ID"]=re.match('.*_(GC.*)',p['external_id']).group(1)
r["Chromosome"]=''
r["Start"]=''
r["Genome assembly"]='GRCh37/hg19'
r["Reference allele"]=''
r["Alternate allele"]=''
r["Transcript"]=''
r["Intergenic"]=''
r["Chromosomal sex"]=''
r["Open-access consent"]='No'
r["Age at last clinical assessment"]=''
r["Prenatal age in weeks"]=''
r["Note"]=''
r["Inheritance"]=''
r["Pathogenicity"]=''
r["Phenotypes"]=', '.join([f['id'] for f in p['features'] if f['observed']=='yes'])
r["HGVS code"]=''
r["Genotype"]=''
r["Responsible contact"]='<NAME>'
print(','.join(['"%s"' % r[k] for k in headers]))
| from __future__ import print_function
import sys
import pymongo
import json
import re
host='phenotips.cs.ucl.ac.uk'
conn = pymongo.MongoClient(host=host, port=27017)
db=conn['patients']
headers=["Internal reference number or ID","Chromosome","Start","Genome assembly","Reference allele","Alternate allele","Transcript","Gene name","Intergenic","Chromosomal sex","Open-access consent","Age at last clinical assessment","Prenatal age in weeks","Note","Inheritance","Pathogenicity","Phenotypes","HGVS code","Genotype","Responsible contact"]
#headers=["Internal reference number or ID","Genome assembly","Gene name","Open-access consent","Phenotypes","Responsible contact"]
print(','.join(map(lambda x: '"%s"'%x,headers)))
for p in db.patients.find({'external_id':{'$regex':re.compile('IRDC_.*_LON_.*')}}):
r=dict()
if 'GC' not in p['external_id']: continue
if 'genes' in p:
r["Gene name"]= ', '.join([g['gene'] for g in p['genes']])
else:
r["Gene name"]= ''
r["Internal reference number or ID"]=re.match('.*_(GC.*)',p['external_id']).group(1)
r["Chromosome"]=''
r["Start"]=''
r["Genome assembly"]='GRCh37/hg19'
r["Reference allele"]=''
r["Alternate allele"]=''
r["Transcript"]=''
r["Intergenic"]=''
r["Chromosomal sex"]=''
r["Open-access consent"]='No'
r["Age at last clinical assessment"]=''
r["Prenatal age in weeks"]=''
r["Note"]=''
r["Inheritance"]=''
r["Pathogenicity"]=''
r["Phenotypes"]=', '.join([f['id'] for f in p['features'] if f['observed']=='yes'])
r["HGVS code"]=''
r["Genotype"]=''
r["Responsible contact"]='<NAME>'
print(','.join(['"%s"' % r[k] for k in headers])) | en | 0.614971 | #headers=["Internal reference number or ID","Genome assembly","Gene name","Open-access consent","Phenotypes","Responsible contact"] | 2.612203 | 3 |
linkedList/reverse-node-in-k.py | windowssocket/py_leetcode | 3 | 6619978 | <reponame>windowssocket/py_leetcode
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
head = dummy
while head is not None:
head = self.findnextK(head, k)
return dummy.next
def findnextK(self, head, k):
"""
n0, n1......nk nm
head = nk
currently head: n0
"""
nk = head
for _ in range(k):
nk = nk.next
if nk is None:
return None
nm = nk.next
prev = head.next
curr = head.next.next
prev.next = nm
h1 = prev
while curr is not nm:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
head.next = prev
return h1
def printlist(self, head):
curr = head
while curr is not None:
print(curr.val)
curr = curr.next
def reverse_node(self, head):
prev = head
curr = head.next
prev.next = None
while curr is not None:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
return prev
def create_list(self):
head = prev = ListNode(1)
for i in range(2, 6):
tmp = ListNode(i)
prev.next = tmp
prev = tmp
return head
s = Solution()
head = s.create_list()
head = s.reverseKGroup(head, 2)
s.printlist(head)
| # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
head = dummy
while head is not None:
head = self.findnextK(head, k)
return dummy.next
def findnextK(self, head, k):
"""
n0, n1......nk nm
head = nk
currently head: n0
"""
nk = head
for _ in range(k):
nk = nk.next
if nk is None:
return None
nm = nk.next
prev = head.next
curr = head.next.next
prev.next = nm
h1 = prev
while curr is not nm:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
head.next = prev
return h1
def printlist(self, head):
curr = head
while curr is not None:
print(curr.val)
curr = curr.next
def reverse_node(self, head):
prev = head
curr = head.next
prev.next = None
while curr is not None:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
return prev
def create_list(self):
head = prev = ListNode(1)
for i in range(2, 6):
tmp = ListNode(i)
prev.next = tmp
prev = tmp
return head
s = Solution()
head = s.create_list()
head = s.reverseKGroup(head, 2)
s.printlist(head) | en | 0.759252 | # Definition for singly-linked list. :type head: ListNode :type k: int :rtype: ListNode n0, n1......nk nm head = nk currently head: n0 | 3.822329 | 4 |
build_extra/rust/run.py | Kachulio1/deno | 0 | 6619979 | <reponame>Kachulio1/deno
#!/usr/bin/env python
# This file just executes its arguments, except that also adds OUT_DIR to the
# environ. This is for compatibility with cargo.
import subprocess
import sys
import os
os.environ["OUT_DIR"] = os.path.abspath(".")
assert os.path.isdir(os.environ["OUT_DIR"])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
| #!/usr/bin/env python
# This file just executes its arguments, except that also adds OUT_DIR to the
# environ. This is for compatibility with cargo.
import subprocess
import sys
import os
os.environ["OUT_DIR"] = os.path.abspath(".")
assert os.path.isdir(os.environ["OUT_DIR"])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ)) | en | 0.875238 | #!/usr/bin/env python # This file just executes its arguments, except that also adds OUT_DIR to the # environ. This is for compatibility with cargo. | 2.121354 | 2 |
tests/dedupe/test_linker.py | tendai-zw/followthemoney | 1 | 6619980 | from unittest import TestCase
from followthemoney import model
from followthemoney.dedupe import EntityLinker
class LinkerTestCase(TestCase):
def test_linker(self):
linker = EntityLinker()
self.assertEqual(linker.resolve('a'), 'a')
self.assertEqual(linker.resolve({'id': 'a'}), 'a')
linker.add('a', 'b')
linker.add('a', None)
linker.add('a', 'a')
self.assertEqual(linker.resolve('a'), linker.resolve('b'))
self.assertEqual(linker.resolve('b'), linker.resolve('a'))
linker.add('b', 'c')
self.assertEqual(linker.resolve('a'), linker.resolve('c'))
self.assertEqual(linker.resolve('b'), linker.resolve('c'))
linker.add('b', 'a')
self.assertEqual(linker.resolve('a'), linker.resolve('c'))
self.assertEqual(linker.resolve('b'), linker.resolve('c'))
linker.add('c', 'a')
self.assertEqual(linker.resolve('a'), linker.resolve('c'))
self.assertEqual(linker.resolve('b'), linker.resolve('c'))
linker.add('c', 'd')
self.assertEqual(linker.resolve('a'), linker.resolve('d'))
self.assertEqual(linker.resolve('b'), linker.resolve('d'))
self.assertTrue(linker.has('a'))
self.assertTrue(linker.has('d'))
self.assertFalse(linker.has('x'))
self.assertFalse(linker.has(None))
def test_remove_ns(self):
linker = EntityLinker()
linker.add('a.xxx', 'b.xxx')
self.assertEqual(linker.resolve('b'), linker.resolve('a'))
self.assertEqual(linker.resolve('b'), 'a')
def test_linker_apply(self):
linker = EntityLinker()
linker.add('foo', 'fox')
linker.add('fox', 'bar')
linker.add('qux', 'quux')
entity = model.get_proxy({
'id': 'foo',
'schema': 'Company',
'properties': {
'sameAs': ['qux', 'banana']
}
})
linked = linker.apply(entity)
self.assertEqual(linked.id, 'bar')
self.assertNotIn('bar', linked.get('sameAs'))
self.assertIn('banana', linked.get('sameAs'))
self.assertIn('qux', linked.get('sameAs'))
| from unittest import TestCase
from followthemoney import model
from followthemoney.dedupe import EntityLinker
class LinkerTestCase(TestCase):
def test_linker(self):
linker = EntityLinker()
self.assertEqual(linker.resolve('a'), 'a')
self.assertEqual(linker.resolve({'id': 'a'}), 'a')
linker.add('a', 'b')
linker.add('a', None)
linker.add('a', 'a')
self.assertEqual(linker.resolve('a'), linker.resolve('b'))
self.assertEqual(linker.resolve('b'), linker.resolve('a'))
linker.add('b', 'c')
self.assertEqual(linker.resolve('a'), linker.resolve('c'))
self.assertEqual(linker.resolve('b'), linker.resolve('c'))
linker.add('b', 'a')
self.assertEqual(linker.resolve('a'), linker.resolve('c'))
self.assertEqual(linker.resolve('b'), linker.resolve('c'))
linker.add('c', 'a')
self.assertEqual(linker.resolve('a'), linker.resolve('c'))
self.assertEqual(linker.resolve('b'), linker.resolve('c'))
linker.add('c', 'd')
self.assertEqual(linker.resolve('a'), linker.resolve('d'))
self.assertEqual(linker.resolve('b'), linker.resolve('d'))
self.assertTrue(linker.has('a'))
self.assertTrue(linker.has('d'))
self.assertFalse(linker.has('x'))
self.assertFalse(linker.has(None))
def test_remove_ns(self):
linker = EntityLinker()
linker.add('a.xxx', 'b.xxx')
self.assertEqual(linker.resolve('b'), linker.resolve('a'))
self.assertEqual(linker.resolve('b'), 'a')
def test_linker_apply(self):
linker = EntityLinker()
linker.add('foo', 'fox')
linker.add('fox', 'bar')
linker.add('qux', 'quux')
entity = model.get_proxy({
'id': 'foo',
'schema': 'Company',
'properties': {
'sameAs': ['qux', 'banana']
}
})
linked = linker.apply(entity)
self.assertEqual(linked.id, 'bar')
self.assertNotIn('bar', linked.get('sameAs'))
self.assertIn('banana', linked.get('sameAs'))
self.assertIn('qux', linked.get('sameAs'))
| none | 1 | 2.623357 | 3 | |
py-practice/py-practice-hackerrank/Python/Strings/merge_tools.py | beenorgone-notebook/python-notebook | 0 | 6619981 | <gh_stars>0
'''
https://hackerrank.com/challenges/merge-the-tools
Given a string S of length N. Divide this string into N/K equal parts
thus each part contain exactly K elements.
Let us consider the string thus obtained in part i as Ti.
For each string Ti thus obtained you have to make a modified string
such that each character that occurs in Ti occurs exactly once in the
modified string.
'''
import textwrap
from collections import OrderedDict
S = input().strip()
N = len(S)
K = int(input())
S_parted = textwrap.wrap(S, K)
'''
def str_duplicates_remove(a_string):
return ''.join(sorted(set(a_string), key=a_string.index))
S_modified = map(str_duplicates_remove, S_parted)
'''
S_modified = map(lambda s: ''.join(sorted(set(s), key=s.index)),
S_parted)
for i in S_modified:
print(i)
# We can use OrderedDict instead:
S_modified = map(lambda s: ''.join(OrderedDict.fromkeys(s)),
S_parted)
# Declarative version
s = input().strip()
k = int(input())
i = 0
while i < len(s):
a = s[i:i + k]
output = ''
for x in a:
if x not in output:
output += x
print(output)
i += k
| '''
https://hackerrank.com/challenges/merge-the-tools
Given a string S of length N. Divide this string into N/K equal parts
thus each part contain exactly K elements.
Let us consider the string thus obtained in part i as Ti.
For each string Ti thus obtained you have to make a modified string
such that each character that occurs in Ti occurs exactly once in the
modified string.
'''
import textwrap
from collections import OrderedDict
S = input().strip()
N = len(S)
K = int(input())
S_parted = textwrap.wrap(S, K)
'''
def str_duplicates_remove(a_string):
return ''.join(sorted(set(a_string), key=a_string.index))
S_modified = map(str_duplicates_remove, S_parted)
'''
S_modified = map(lambda s: ''.join(sorted(set(s), key=s.index)),
S_parted)
for i in S_modified:
print(i)
# We can use OrderedDict instead:
S_modified = map(lambda s: ''.join(OrderedDict.fromkeys(s)),
S_parted)
# Declarative version
s = input().strip()
k = int(input())
i = 0
while i < len(s):
a = s[i:i + k]
output = ''
for x in a:
if x not in output:
output += x
print(output)
i += k | en | 0.727405 | https://hackerrank.com/challenges/merge-the-tools Given a string S of length N. Divide this string into N/K equal parts thus each part contain exactly K elements. Let us consider the string thus obtained in part i as Ti. For each string Ti thus obtained you have to make a modified string such that each character that occurs in Ti occurs exactly once in the modified string. def str_duplicates_remove(a_string): return ''.join(sorted(set(a_string), key=a_string.index)) S_modified = map(str_duplicates_remove, S_parted) # We can use OrderedDict instead: # Declarative version | 3.771599 | 4 |
bc/home/migrations/0005_merge_20200117_1726.py | Buckinghamshire-Digital-Service/buckinghamshire-council | 1 | 6619982 | # Generated by Django 2.2.9 on 2020-01-17 17:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("home", "0004_homepage_alert_message"),
("home", "0004_redirect_to_external_url"),
]
operations = []
| # Generated by Django 2.2.9 on 2020-01-17 17:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("home", "0004_homepage_alert_message"),
("home", "0004_redirect_to_external_url"),
]
operations = []
| en | 0.801441 | # Generated by Django 2.2.9 on 2020-01-17 17:26 | 1.315489 | 1 |
tests/test_Operation.py | kdungs/lhcb-hltflow | 1 | 6619983 | <reponame>kdungs/lhcb-hltflow
import unittest
from hltflow.core import Operation
class TestOperation(unittest.TestCase):
def testIsCut(self):
"""
Iff a line begins with an opening parenthesis, it's assumed to be a
cut.
"""
self.assertTrue(Operation.is_cut(
'( ( TrPT > %(PT)s * MeV ) & ( TrP > %(P)s * MeV ) )'))
self.assertFalse(Operation.is_cut('Not a cut'))
def testIsSink(self):
"""
Iff a line begins with the word SINK, it's assumed to be a sink.
"""
self.assertTrue(Operation.is_sink("SINK( 'Hlt1%(name)sDecision' )"))
self.assertFalse(Operation.is_sink('Not a sink'))
| import unittest
from hltflow.core import Operation
class TestOperation(unittest.TestCase):
def testIsCut(self):
"""
Iff a line begins with an opening parenthesis, it's assumed to be a
cut.
"""
self.assertTrue(Operation.is_cut(
'( ( TrPT > %(PT)s * MeV ) & ( TrP > %(P)s * MeV ) )'))
self.assertFalse(Operation.is_cut('Not a cut'))
def testIsSink(self):
"""
Iff a line begins with the word SINK, it's assumed to be a sink.
"""
self.assertTrue(Operation.is_sink("SINK( 'Hlt1%(name)sDecision' )"))
self.assertFalse(Operation.is_sink('Not a sink')) | en | 0.972145 | Iff a line begins with an opening parenthesis, it's assumed to be a cut. Iff a line begins with the word SINK, it's assumed to be a sink. | 2.997662 | 3 |
helpers/mockdbhelper.py | sabbir360/mvc-flask | 2 | 6619984 | <reponame>sabbir360/mvc-flask<filename>helpers/mockdbhelper.py
MOCK_USERS = [
{"email": '<EMAIL>', "password": '<PASSWORD>'}
]
MOCK_TABLES = [{"_id": "1", "number": "1",
"owner": "<EMAIL>", "url": "/newrequest"}]
class MockDBHelper:
def get_user(self, email):
user = [x for x in MOCK_USERS if x.get("email") == email]
if user:
return user[0]
# if email in MOCK_USERS:
# return MOCK_USERS[email]
return None
def add_user(self, email, password):
MOCK_USERS.append(dict(email=email, password=password))
def add_table(self, number, owner):
MOCK_TABLES.append({"_id": number, "number": number, "owner": owner})
return number
def update_table(self, _id, url):
for table in MOCK_TABLES:
if table.get("_id") == _id:
table["url"] = url
break
def get_tables(self, owner_id):
return MOCK_TABLES
def delete_table(self, table_id):
for i, table in enumerate(MOCK_TABLES):
if table.get("_id") == table_id:
del MOCK_TABLES[i]
break
| MOCK_USERS = [
{"email": '<EMAIL>', "password": '<PASSWORD>'}
]
MOCK_TABLES = [{"_id": "1", "number": "1",
"owner": "<EMAIL>", "url": "/newrequest"}]
class MockDBHelper:
def get_user(self, email):
user = [x for x in MOCK_USERS if x.get("email") == email]
if user:
return user[0]
# if email in MOCK_USERS:
# return MOCK_USERS[email]
return None
def add_user(self, email, password):
MOCK_USERS.append(dict(email=email, password=password))
def add_table(self, number, owner):
MOCK_TABLES.append({"_id": number, "number": number, "owner": owner})
return number
def update_table(self, _id, url):
for table in MOCK_TABLES:
if table.get("_id") == _id:
table["url"] = url
break
def get_tables(self, owner_id):
return MOCK_TABLES
def delete_table(self, table_id):
for i, table in enumerate(MOCK_TABLES):
if table.get("_id") == table_id:
del MOCK_TABLES[i]
break | en | 0.427411 | # if email in MOCK_USERS: # return MOCK_USERS[email] | 2.749955 | 3 |
posts/templatetags/markdown.py | yyyyyyyan/esquer.dev | 9 | 6619985 | <gh_stars>1-10
from django import template
from markdown import Markdown
register = template.Library()
md = Markdown(
extensions=["fenced_code", "codehilite", "toc"],
extension_configs={"codehilite": {"linenums": False, "guess_lang": False}},
output_format="html5",
)
@register.filter()
def markdown(value):
return md.convert(value)
| from django import template
from markdown import Markdown
register = template.Library()
md = Markdown(
extensions=["fenced_code", "codehilite", "toc"],
extension_configs={"codehilite": {"linenums": False, "guess_lang": False}},
output_format="html5",
)
@register.filter()
def markdown(value):
return md.convert(value) | none | 1 | 2.106337 | 2 | |
shorttimeseries/cli.py | lemon24/shorttimeseries | 0 | 6619986 | import click
from . import parse, InvalidTimestamp
@click.command()
@click.argument('file',
type=click.File())
@click.option('-i', '--initial')
@click.option('-p', '--precision',
type=click.Choice(['day', 'hour', 'minute', 'second']), default='minute')
def main(file, initial, precision):
try:
for timestamp, label in parse(file, initial, precision):
click.echo("{:%Y-%m-%dT%H:%M:%S} {}".format(timestamp, label))
except ValueError as e:
raise click.BadParameter(str(e))
except InvalidTimestamp as e:
raise click.ClickException(str(e))
if __name__ == '__main__':
main()
| import click
from . import parse, InvalidTimestamp
@click.command()
@click.argument('file',
type=click.File())
@click.option('-i', '--initial')
@click.option('-p', '--precision',
type=click.Choice(['day', 'hour', 'minute', 'second']), default='minute')
def main(file, initial, precision):
try:
for timestamp, label in parse(file, initial, precision):
click.echo("{:%Y-%m-%dT%H:%M:%S} {}".format(timestamp, label))
except ValueError as e:
raise click.BadParameter(str(e))
except InvalidTimestamp as e:
raise click.ClickException(str(e))
if __name__ == '__main__':
main()
| none | 1 | 2.908466 | 3 | |
Visualization/value_viz.py | robotsorcerer/LevelSetPy | 4 | 6619987 | <gh_stars>1-10
__author__ = "<NAME>"
__copyright__ = "2021, Decomposing Level Sets of PDEs"
__credits__ = "<NAME>, <NAME>"
__license__ = "Lekan License"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Completed"
import time, os
import numpy as np
from os.path import join
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
from LevelSetPy.Utilities.matlab_utils import *
from LevelSetPy.Grids.create_grid import createGrid
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from LevelSetPy.Visualization.mesh_implicit import implicit_mesh
from LevelSetPy.ValueFuncs import proj
from LevelSetPy.Visualization.settings import buffered_axis_limits
class ValueVisualizer(object):
def __init__(self, params={}):
"""
Use this class to visualize the starting value function's
zero levelset. If you use a value function different to the
one specified in the cylinder, you may need to specify the axis
limits of the plot manually.
Or send a PR if you can help write a dynamic axes adjuster as the
level sets' limits varies.
Copyright (c) <NAME>. https://scriptedonachip.com
2021.
"""
plt.ion()
if params.winsize:
self.winsize=params.winsize
# create figure 2 for real-time value function updates
self._fig = plt.figure(2, figsize=self.winsize)
# mngr = plt.get_current_fig_manager()
# geom = mngr.window.geometry()
# x,y,dx,dy = geom.getRect()
self._gs = gridspec.GridSpec(1, 3, self._fig)
self._ax_arr = [plt.subplot(self._gs[i], projection='3d') for i in range(2)] + [plt.subplot(self._gs[2])]
self._labelsize = params.labelsize
self._init = False
self.value = params.value if isfield(params, 'value') else False
self._fontdict = params.fontdict
self.pause_time = params.pause_time
self.savedict = params.savedict
self.params = params
self.color = iter(plt.cm.ocean(np.linspace(0, 1, 3)))
self._init = False
if self.savedict.save and not os.path.exists(self.savedict.savepath):
os.makedirs(self.savedict.savepath)
if self._fontdict is None:
self._fontdict = {'fontsize':12, 'fontweight':'bold'}
if params.init_conditions or self.value:
assert isinstance(value, np.ndarray), "value function must be an ndarray."
self.init_projections(value.ndim)
else:
# fig 1 for initial value set
self._fig_init_val = plt.figure(1, figsize=(16, 9))
# # https://stackoverflow.com/questions/7449585/how-do-you-set-the-absolute-position-of-figure-windows-with-matplotlib
# mngr = self._fig_init_val.get_current_fig_manager()
# geom = mngr.window.geometry()
# x,y,dx,dy = geom.getRect()
# mngr.window.setGeometry(100, 200, 640, 580)
self.viz_init_valueset(params, 0)
self._fig_init_val.canvas.draw()
self._fig_init_val.canvas.flush_events()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
def init_projections(self, value_dims, ax_idx = 0):
"""
Initialize plots based off the length of the data array.
"""
if self.params.init_projections:
if value_dims==2:
self._ax_arr[ax_idx] = plt.subplot(self._gs_plots[0])
self._ax_arr[ax_idx].plot(self.value[0], self.value[1], 'b*') #,markersize=15,linewidth=3, label='Target')
elif value_dims==3:
self._ax_arr[ax_idx] = plt.subplot(self._gs_plots[0], projection='3d')
self._ax_arr[ax_idx].plot(self.value[0], self.value[1], self.value[2], 'b*') #,markersize=15,linewidth=3, label='Target')
self._ax_arr[ax_idx].set_title('Initial Projections', fontdict=self._fontdict)
ax_idx += 1
else:
value_data = self.params.value_data
if isfield(self.params, 'grid_bundle') and isbundle(self.params.grid_bundle, self.params):
g = self.params.grid_bundle
else: # create the grid
N = np.asarray(size(value_data)).T
g = createGrid(np.ones(value_data.ndim, 1), N, N)
if g.dim != value_data.ndim and g.dim+1 != value_data.ndim:
raise ValueError('Grid dimension is inconsistent with data dimension!')
if g.dim == value_data.ndim:
self.init_valueset(g, value_data, ax_idx)
self._ax_arr[ax_idx].set_title('Initial Value Set', fontdict=self._fontdict)
self._ax_arr[ax_idx].xaxis.set_tick_params(labelsize=self._labelsize)
self._ax_arr[ax_idx].yaxis.set_tick_params(labelsize=self._labelsize)
self._ax_arr[ax_idx].grid('on')
self._ax_arr[ax_idx].legend(loc='best', fontdict = self._fontdict)
self._init = True
def viz_init_valueset(self, params, ax_idx=0):
g = params.grid_bundle
data = params.value_data
self._init = True
if g.dim<2:
# init_ax = self._fig_init_val.gca(projection='3d')
self._ax_arr[0].plot(g.xs[0], data, linestyle='-', color=next(self.color))
self._ax_arr[0].plot(g.xs[0], np.zeros(size(g.xs[0])), linestyle=':', color='k')
elif g.dim==2:
# init_ax = self._fig_init_val.axes(projection='3d')
self._ax_arr[0].contourf(g.xs[0], g.xs[1], self.value, levels=self.params.level, colors=next(self.color))
self.title(init_ax, title=f'Initial {self.params.level}-Value Set')# init_ax.set_xlabel('X', fontdict=self.fontdict)
elif g.dim == 3:
spacing = tuple(g.dx.flatten().tolist())
mesh = implicit_mesh(data, level=self.params.level, spacing=spacing, edge_color='k', face_color='r')
self.show_3d(g, mesh.mesh, self._ax_arr[0], spacing)
xlim, ylim, zlim = self.get_lims(mesh)
self._ax_arr[0].set_xlim(*xlim)
self._ax_arr[0].set_ylim(*ylim)
self._ax_arr[0].set_zlim(*zlim)
self.set_title(self._ax_arr[0], title=f'Starting {self.params.level}-level Value Set')
elif g.dim == 4:
# This is useful for the temporal-axis and 3 Cartesian Coordinates
'Take 6 slice snapshots and show me the 3D projections'
N=6
gs = gridspec.GridSpec(2, 3, self._fig_init_val)
ax = [plt.subplot(gs[i], projection='3d') for i in range(N)]
for slice_idx in range(N):
ax[slice_idx] = plt.subplot(gs[slice_idx], projection='3d')
xs = g.min[g.dim] + slice_idx/(N+1) * (g.max[g.dim] - g.min[g.dim])
dim = [0, 0, 0, 1]
g3D, mesh3D = proj(g, data, dim, xs)
self.show_3d(g3D, mesh3D, ax[slice_idx], color, spacing)
self.set_title(ax_idx, f"Projected Slice {g.dim} of Initial Value Function Snapshot {slice_idx}.")
if self.savedict.save:
self._fig_init_val.savefig(join(self.savedict.savepath,self.savedict.savename),
bbox_inches='tight',facecolor='None')
def show_3d(self, g, mesh, ax_idx, spacing):
# ax_idx.plot3D(g.xs[0].flatten(), g.xs[1].flatten(), g.xs[2].flatten(), color=next(self.color))
if isinstance(mesh, list):
for m in mesh:
m = implicit_mesh(m, level=self.params.level, spacing=spacing, edge_color='k', face_color='r')
ax_idx.add_collection3d(m)
else:
ax_idx.add_collection3d(mesh)
ax_idx.view_init(elev=30., azim=10.)
def set_title(self, ax, title):
ax.set_title(title)
ax.title.set_fontsize(self._fontdict.fontsize)
ax.title.set_fontweight(self._fontdict.fontweight)
def add_legend(self, linestyle, marker, color, label):
self._ax_legend.plot([], [], linestyle=linestyle, marker=marker,
color=color, label=label)
self._ax_legend.legend(ncol=2, mode='expand', fontsize=10)
def viz_value_func(self, gPlot,dataPlot,color,ax):
"""
Visualize a surface plot of the entire value function.
Inputs:
gPlot: grid on which value function is parameterized
dataPlot: Value function data defined as an implicit
function on the grid.
color: what color to give the value function
ax: axis on which to graw the value function. If not given, it
grabs the current axis from pyplot.
Output:
Returns the value function 3D plot.
"""
if gPlot.dim<2:
h, = ax.plot(gPlot.xs[0], np.squeeze(dataPlot), color=color, linewidth=2);
elif gPlot.dim==2:
h, = ax.plot_surface(gPlot.xs[0], gPlot.xs[1], dataPlot, rstride=1, cstride=1,
cmap='viridis', edgecolor='r', facecolor=color)
else:
error('Can not plot in more than 3D!')
return h
def levelset_viz(self, g, data, title='', fc='c', ec='k'):
"""
Simultaneously visualize the level sets of a value function
on a 1X3 chart:
Chart 131: 2D Value function as a surface mesh
Chart 132: 2D Value function as colored contour levels
Chart 133: 2D Value zero - set as cyan contour.
Author: <NAME>, October 29, 2021
"""
plt.clf()
self._ax_arr = [plt.subplot(self._gs[i], projection='3d') for i in range(2)] + [plt.subplot(self._gs[2])]
if g.dim==2:
self._ax_arr[0].plot_surface(g.xs[0], g.xs[1], data, rstride=1, cstride=1,
cmap='viridis', edgecolor=ec, facecolor=fc)
self._ax_arr[0].set_xlabel('X', fontdict=self._fontdict)
self._ax_arr[0].set_ylabel('Y', fontdict=self._fontdict)
self._ax_arr[0].set_zlabel('Z', fontdict=self._fontdict)
self._ax_arr[0].set_title(f'{title}', fontdict=self._fontdict)
self._ax_arr[1].contourf(g.xs[0], g.xs[1], data, colors=fc)
self._ax_arr[1].set_xlabel('X', fontdict=self._fontdict)
self._ax_arr[1].set_title(f'Contours', fontdict=self._fontdict)
self._ax_arr[2].contour(g.xs[0], g.xs[1], data, levels=0, colors=fc)
self._ax_arr[2].set_xlabel('X', fontdict=self._fontdict)
self._ax_arr[2].set_ylabel('Y', fontdict=self._fontdict)
self._ax_arr[2].grid('on')
self._ax_arr[2].set_title(f'2D Zero level set', fontdict=self._fontdict)
elif g.dim == 3:
# draw the mesh first # see example in test 3d mesh
# self._ax_arr[0].plot3D(g.xs[0].flatten(), g.xs[1].flatten(), g.xs[2].flatten(), color='cyan')
'add the zero level set'
mesh = implicit_mesh(data, level=0., spacing=tuple(g.dx.flatten().tolist()), edge_color=None, face_color='g')
self._ax_arr[0].add_collection3d(mesh)
# self._ax_arr[1].set_xlabel('X', fontdict=self._fontdict)
# self._ax_arr[1].set_title(f'3D Zero level set', fontdict=self._fontdict)
self._ax_arr[0].view_init(elev=30., azim=10.)
xlims, ylims, zlims = self.get_lims(mesh)
self._ax_arr[0].set_xlim(xlims)
self._ax_arr[0].set_ylim(ylims)
self._ax_arr[0].set_zlim(zlims)
self.set_title(self._ax_arr[1], f'3D Zero level set')
# 'zero level set with set azimuth and elevation'
# # mesh = implicit_mesh(data, level=0., spacing=tuple(g.dx.flatten().tolist()), edge_color=None, face_color='g')
# # project last dim and visu 2D level set
xs = 'min' # xs = g.min[g.dim-1] + 3/(N+1) * (g.max[g.dim-1] - g.min[g.dim-1])
g_red, data_red = proj(g, data, [0, 0, 1], xs)
self._ax_arr[1].plot_surface(g_red.xs[0], g_red.xs[1], data_red, rstride=1, cstride=1,
cmap='viridis', edgecolor='k', facecolor='red')
# self._ax_arr[1].contourf(g_red.xs[0], g_red.xs[1], data_red, colors=next(self.color))
self.set_title(self._ax_arr[1], f'Value function surface')
self._ax_arr[1].view_init(elev=30., azim=10.)
# self._ax_arr[2].contour(g_red.xs[0], g_red.xs[1], data_red, colors=next(self.color))
self._ax_arr[2].contourf(g_red.xs[0], g_red.xs[1], data_red, colors='blue')
# # self._ax_arr[2].set_xlabel('X', fontdict=self._fontdict)
# # self._ax_arr[2].set_title(f'3D Zero level set', fontdict=self._fontdict)
# self._ax_arr[2].view_init(elev=60., azim=10.)
self.set_title(self._ax_arr[2], f'Zero level set slice')
self._fig.tight_layout()
if self.savedict.save:
self._fig.savefig(join(self.savedict.savepath,self.savedict.savename),
bbox_inches='tight',facecolor='None')
self.draw()
time.sleep(self.params.pause_time)
def draw(self):
for ax in self._ax_arr:
ax.draw_artist(ax)
self._fig.canvas.draw()
self._fig.canvas.flush_events()
def get_lims(self, mesh):
xlim = (mesh.verts[:, 0].min(), mesh.verts[:,0].max())
ylim = (mesh.verts[:, 1].min(), mesh.verts[:,1].max())
zlim = (mesh.verts[:, 2].min(), mesh.verts[:,2].max())
return xlim, ylim, zlim
| __author__ = "<NAME>"
__copyright__ = "2021, Decomposing Level Sets of PDEs"
__credits__ = "<NAME>, <NAME>"
__license__ = "Lekan License"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Completed"
import time, os
import numpy as np
from os.path import join
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
from LevelSetPy.Utilities.matlab_utils import *
from LevelSetPy.Grids.create_grid import createGrid
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from LevelSetPy.Visualization.mesh_implicit import implicit_mesh
from LevelSetPy.ValueFuncs import proj
from LevelSetPy.Visualization.settings import buffered_axis_limits
class ValueVisualizer(object):
def __init__(self, params={}):
"""
Use this class to visualize the starting value function's
zero levelset. If you use a value function different to the
one specified in the cylinder, you may need to specify the axis
limits of the plot manually.
Or send a PR if you can help write a dynamic axes adjuster as the
level sets' limits varies.
Copyright (c) <NAME>. https://scriptedonachip.com
2021.
"""
plt.ion()
if params.winsize:
self.winsize=params.winsize
# create figure 2 for real-time value function updates
self._fig = plt.figure(2, figsize=self.winsize)
# mngr = plt.get_current_fig_manager()
# geom = mngr.window.geometry()
# x,y,dx,dy = geom.getRect()
self._gs = gridspec.GridSpec(1, 3, self._fig)
self._ax_arr = [plt.subplot(self._gs[i], projection='3d') for i in range(2)] + [plt.subplot(self._gs[2])]
self._labelsize = params.labelsize
self._init = False
self.value = params.value if isfield(params, 'value') else False
self._fontdict = params.fontdict
self.pause_time = params.pause_time
self.savedict = params.savedict
self.params = params
self.color = iter(plt.cm.ocean(np.linspace(0, 1, 3)))
self._init = False
if self.savedict.save and not os.path.exists(self.savedict.savepath):
os.makedirs(self.savedict.savepath)
if self._fontdict is None:
self._fontdict = {'fontsize':12, 'fontweight':'bold'}
if params.init_conditions or self.value:
assert isinstance(value, np.ndarray), "value function must be an ndarray."
self.init_projections(value.ndim)
else:
# fig 1 for initial value set
self._fig_init_val = plt.figure(1, figsize=(16, 9))
# # https://stackoverflow.com/questions/7449585/how-do-you-set-the-absolute-position-of-figure-windows-with-matplotlib
# mngr = self._fig_init_val.get_current_fig_manager()
# geom = mngr.window.geometry()
# x,y,dx,dy = geom.getRect()
# mngr.window.setGeometry(100, 200, 640, 580)
self.viz_init_valueset(params, 0)
self._fig_init_val.canvas.draw()
self._fig_init_val.canvas.flush_events()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
def init_projections(self, value_dims, ax_idx = 0):
"""
Initialize plots based off the length of the data array.
"""
if self.params.init_projections:
if value_dims==2:
self._ax_arr[ax_idx] = plt.subplot(self._gs_plots[0])
self._ax_arr[ax_idx].plot(self.value[0], self.value[1], 'b*') #,markersize=15,linewidth=3, label='Target')
elif value_dims==3:
self._ax_arr[ax_idx] = plt.subplot(self._gs_plots[0], projection='3d')
self._ax_arr[ax_idx].plot(self.value[0], self.value[1], self.value[2], 'b*') #,markersize=15,linewidth=3, label='Target')
self._ax_arr[ax_idx].set_title('Initial Projections', fontdict=self._fontdict)
ax_idx += 1
else:
value_data = self.params.value_data
if isfield(self.params, 'grid_bundle') and isbundle(self.params.grid_bundle, self.params):
g = self.params.grid_bundle
else: # create the grid
N = np.asarray(size(value_data)).T
g = createGrid(np.ones(value_data.ndim, 1), N, N)
if g.dim != value_data.ndim and g.dim+1 != value_data.ndim:
raise ValueError('Grid dimension is inconsistent with data dimension!')
if g.dim == value_data.ndim:
self.init_valueset(g, value_data, ax_idx)
self._ax_arr[ax_idx].set_title('Initial Value Set', fontdict=self._fontdict)
self._ax_arr[ax_idx].xaxis.set_tick_params(labelsize=self._labelsize)
self._ax_arr[ax_idx].yaxis.set_tick_params(labelsize=self._labelsize)
self._ax_arr[ax_idx].grid('on')
self._ax_arr[ax_idx].legend(loc='best', fontdict = self._fontdict)
self._init = True
def viz_init_valueset(self, params, ax_idx=0):
g = params.grid_bundle
data = params.value_data
self._init = True
if g.dim<2:
# init_ax = self._fig_init_val.gca(projection='3d')
self._ax_arr[0].plot(g.xs[0], data, linestyle='-', color=next(self.color))
self._ax_arr[0].plot(g.xs[0], np.zeros(size(g.xs[0])), linestyle=':', color='k')
elif g.dim==2:
# init_ax = self._fig_init_val.axes(projection='3d')
self._ax_arr[0].contourf(g.xs[0], g.xs[1], self.value, levels=self.params.level, colors=next(self.color))
self.title(init_ax, title=f'Initial {self.params.level}-Value Set')# init_ax.set_xlabel('X', fontdict=self.fontdict)
elif g.dim == 3:
spacing = tuple(g.dx.flatten().tolist())
mesh = implicit_mesh(data, level=self.params.level, spacing=spacing, edge_color='k', face_color='r')
self.show_3d(g, mesh.mesh, self._ax_arr[0], spacing)
xlim, ylim, zlim = self.get_lims(mesh)
self._ax_arr[0].set_xlim(*xlim)
self._ax_arr[0].set_ylim(*ylim)
self._ax_arr[0].set_zlim(*zlim)
self.set_title(self._ax_arr[0], title=f'Starting {self.params.level}-level Value Set')
elif g.dim == 4:
# This is useful for the temporal-axis and 3 Cartesian Coordinates
'Take 6 slice snapshots and show me the 3D projections'
N=6
gs = gridspec.GridSpec(2, 3, self._fig_init_val)
ax = [plt.subplot(gs[i], projection='3d') for i in range(N)]
for slice_idx in range(N):
ax[slice_idx] = plt.subplot(gs[slice_idx], projection='3d')
xs = g.min[g.dim] + slice_idx/(N+1) * (g.max[g.dim] - g.min[g.dim])
dim = [0, 0, 0, 1]
g3D, mesh3D = proj(g, data, dim, xs)
self.show_3d(g3D, mesh3D, ax[slice_idx], color, spacing)
self.set_title(ax_idx, f"Projected Slice {g.dim} of Initial Value Function Snapshot {slice_idx}.")
if self.savedict.save:
self._fig_init_val.savefig(join(self.savedict.savepath,self.savedict.savename),
bbox_inches='tight',facecolor='None')
def show_3d(self, g, mesh, ax_idx, spacing):
# ax_idx.plot3D(g.xs[0].flatten(), g.xs[1].flatten(), g.xs[2].flatten(), color=next(self.color))
if isinstance(mesh, list):
for m in mesh:
m = implicit_mesh(m, level=self.params.level, spacing=spacing, edge_color='k', face_color='r')
ax_idx.add_collection3d(m)
else:
ax_idx.add_collection3d(mesh)
ax_idx.view_init(elev=30., azim=10.)
def set_title(self, ax, title):
ax.set_title(title)
ax.title.set_fontsize(self._fontdict.fontsize)
ax.title.set_fontweight(self._fontdict.fontweight)
def add_legend(self, linestyle, marker, color, label):
self._ax_legend.plot([], [], linestyle=linestyle, marker=marker,
color=color, label=label)
self._ax_legend.legend(ncol=2, mode='expand', fontsize=10)
def viz_value_func(self, gPlot,dataPlot,color,ax):
"""
Visualize a surface plot of the entire value function.
Inputs:
gPlot: grid on which value function is parameterized
dataPlot: Value function data defined as an implicit
function on the grid.
color: what color to give the value function
ax: axis on which to graw the value function. If not given, it
grabs the current axis from pyplot.
Output:
Returns the value function 3D plot.
"""
if gPlot.dim<2:
h, = ax.plot(gPlot.xs[0], np.squeeze(dataPlot), color=color, linewidth=2);
elif gPlot.dim==2:
h, = ax.plot_surface(gPlot.xs[0], gPlot.xs[1], dataPlot, rstride=1, cstride=1,
cmap='viridis', edgecolor='r', facecolor=color)
else:
error('Can not plot in more than 3D!')
return h
def levelset_viz(self, g, data, title='', fc='c', ec='k'):
"""
Simultaneously visualize the level sets of a value function
on a 1X3 chart:
Chart 131: 2D Value function as a surface mesh
Chart 132: 2D Value function as colored contour levels
Chart 133: 2D Value zero - set as cyan contour.
Author: <NAME>, October 29, 2021
"""
plt.clf()
self._ax_arr = [plt.subplot(self._gs[i], projection='3d') for i in range(2)] + [plt.subplot(self._gs[2])]
if g.dim==2:
self._ax_arr[0].plot_surface(g.xs[0], g.xs[1], data, rstride=1, cstride=1,
cmap='viridis', edgecolor=ec, facecolor=fc)
self._ax_arr[0].set_xlabel('X', fontdict=self._fontdict)
self._ax_arr[0].set_ylabel('Y', fontdict=self._fontdict)
self._ax_arr[0].set_zlabel('Z', fontdict=self._fontdict)
self._ax_arr[0].set_title(f'{title}', fontdict=self._fontdict)
self._ax_arr[1].contourf(g.xs[0], g.xs[1], data, colors=fc)
self._ax_arr[1].set_xlabel('X', fontdict=self._fontdict)
self._ax_arr[1].set_title(f'Contours', fontdict=self._fontdict)
self._ax_arr[2].contour(g.xs[0], g.xs[1], data, levels=0, colors=fc)
self._ax_arr[2].set_xlabel('X', fontdict=self._fontdict)
self._ax_arr[2].set_ylabel('Y', fontdict=self._fontdict)
self._ax_arr[2].grid('on')
self._ax_arr[2].set_title(f'2D Zero level set', fontdict=self._fontdict)
elif g.dim == 3:
# draw the mesh first # see example in test 3d mesh
# self._ax_arr[0].plot3D(g.xs[0].flatten(), g.xs[1].flatten(), g.xs[2].flatten(), color='cyan')
'add the zero level set'
mesh = implicit_mesh(data, level=0., spacing=tuple(g.dx.flatten().tolist()), edge_color=None, face_color='g')
self._ax_arr[0].add_collection3d(mesh)
# self._ax_arr[1].set_xlabel('X', fontdict=self._fontdict)
# self._ax_arr[1].set_title(f'3D Zero level set', fontdict=self._fontdict)
self._ax_arr[0].view_init(elev=30., azim=10.)
xlims, ylims, zlims = self.get_lims(mesh)
self._ax_arr[0].set_xlim(xlims)
self._ax_arr[0].set_ylim(ylims)
self._ax_arr[0].set_zlim(zlims)
self.set_title(self._ax_arr[1], f'3D Zero level set')
# 'zero level set with set azimuth and elevation'
# # mesh = implicit_mesh(data, level=0., spacing=tuple(g.dx.flatten().tolist()), edge_color=None, face_color='g')
# # project last dim and visu 2D level set
xs = 'min' # xs = g.min[g.dim-1] + 3/(N+1) * (g.max[g.dim-1] - g.min[g.dim-1])
g_red, data_red = proj(g, data, [0, 0, 1], xs)
self._ax_arr[1].plot_surface(g_red.xs[0], g_red.xs[1], data_red, rstride=1, cstride=1,
cmap='viridis', edgecolor='k', facecolor='red')
# self._ax_arr[1].contourf(g_red.xs[0], g_red.xs[1], data_red, colors=next(self.color))
self.set_title(self._ax_arr[1], f'Value function surface')
self._ax_arr[1].view_init(elev=30., azim=10.)
# self._ax_arr[2].contour(g_red.xs[0], g_red.xs[1], data_red, colors=next(self.color))
self._ax_arr[2].contourf(g_red.xs[0], g_red.xs[1], data_red, colors='blue')
# # self._ax_arr[2].set_xlabel('X', fontdict=self._fontdict)
# # self._ax_arr[2].set_title(f'3D Zero level set', fontdict=self._fontdict)
# self._ax_arr[2].view_init(elev=60., azim=10.)
self.set_title(self._ax_arr[2], f'Zero level set slice')
self._fig.tight_layout()
if self.savedict.save:
self._fig.savefig(join(self.savedict.savepath,self.savedict.savename),
bbox_inches='tight',facecolor='None')
self.draw()
time.sleep(self.params.pause_time)
def draw(self):
for ax in self._ax_arr:
ax.draw_artist(ax)
self._fig.canvas.draw()
self._fig.canvas.flush_events()
def get_lims(self, mesh):
xlim = (mesh.verts[:, 0].min(), mesh.verts[:,0].max())
ylim = (mesh.verts[:, 1].min(), mesh.verts[:,1].max())
zlim = (mesh.verts[:, 2].min(), mesh.verts[:,2].max())
return xlim, ylim, zlim | en | 0.415572 | Use this class to visualize the starting value function's
zero levelset. If you use a value function different to the
one specified in the cylinder, you may need to specify the axis
limits of the plot manually.
Or send a PR if you can help write a dynamic axes adjuster as the
level sets' limits varies.
Copyright (c) <NAME>. https://scriptedonachip.com
2021. # create figure 2 for real-time value function updates # mngr = plt.get_current_fig_manager() # geom = mngr.window.geometry() # x,y,dx,dy = geom.getRect() # fig 1 for initial value set # # https://stackoverflow.com/questions/7449585/how-do-you-set-the-absolute-position-of-figure-windows-with-matplotlib # mngr = self._fig_init_val.get_current_fig_manager() # geom = mngr.window.geometry() # x,y,dx,dy = geom.getRect() # mngr.window.setGeometry(100, 200, 640, 580) Initialize plots based off the length of the data array. #,markersize=15,linewidth=3, label='Target') #,markersize=15,linewidth=3, label='Target') # create the grid # init_ax = self._fig_init_val.gca(projection='3d') # init_ax = self._fig_init_val.axes(projection='3d') # init_ax.set_xlabel('X', fontdict=self.fontdict) # This is useful for the temporal-axis and 3 Cartesian Coordinates # ax_idx.plot3D(g.xs[0].flatten(), g.xs[1].flatten(), g.xs[2].flatten(), color=next(self.color)) Visualize a surface plot of the entire value function.
Inputs:
gPlot: grid on which value function is parameterized
dataPlot: Value function data defined as an implicit
function on the grid.
color: what color to give the value function
ax: axis on which to graw the value function. If not given, it
grabs the current axis from pyplot.
Output:
Returns the value function 3D plot. Simultaneously visualize the level sets of a value function
on a 1X3 chart:
Chart 131: 2D Value function as a surface mesh
Chart 132: 2D Value function as colored contour levels
Chart 133: 2D Value zero - set as cyan contour.
Author: <NAME>, October 29, 2021 # draw the mesh first # see example in test 3d mesh # self._ax_arr[0].plot3D(g.xs[0].flatten(), g.xs[1].flatten(), g.xs[2].flatten(), color='cyan') # self._ax_arr[1].set_xlabel('X', fontdict=self._fontdict) # self._ax_arr[1].set_title(f'3D Zero level set', fontdict=self._fontdict) # 'zero level set with set azimuth and elevation' # # mesh = implicit_mesh(data, level=0., spacing=tuple(g.dx.flatten().tolist()), edge_color=None, face_color='g') # # project last dim and visu 2D level set # xs = g.min[g.dim-1] + 3/(N+1) * (g.max[g.dim-1] - g.min[g.dim-1]) # self._ax_arr[1].contourf(g_red.xs[0], g_red.xs[1], data_red, colors=next(self.color)) # self._ax_arr[2].contour(g_red.xs[0], g_red.xs[1], data_red, colors=next(self.color)) # # self._ax_arr[2].set_xlabel('X', fontdict=self._fontdict) # # self._ax_arr[2].set_title(f'3D Zero level set', fontdict=self._fontdict) # self._ax_arr[2].view_init(elev=60., azim=10.) | 2.559473 | 3 |
genoome/twentythree/urls.py | jiivan/genoomy | 0 | 6619988 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.login23, name='login'),
url(r'^comeback/$', views.comeback, name='comeback'),
url(r'^profiles/$', views.profiles, name='profiles'),
url(r'^status/$', views.status, name='status'),
]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.login23, name='login'),
url(r'^comeback/$', views.comeback, name='comeback'),
url(r'^profiles/$', views.profiles, name='profiles'),
url(r'^status/$', views.status, name='status'),
] | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.562382 | 2 |
helpers.py | AuxiliumCDNG/MET-Bot | 0 | 6619989 | import functools
import discord.errors as derrors
from flask import abort
from globals import discord, client, connection_pool
def change_setting(setting, value):
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
cursor.execute("SELECT * FROM settings WHERE setting='%s'" % (str(setting)))
if len(cursor.fetchall()) > 0:
cursor.execute("UPDATE settings SET value='%s' WHERE setting='%s'" % (str(value), str(setting)))
else:
cursor.execute("INSERT INTO settings (setting, value) VALUES ('%s', '%s')" % (str(setting), str(value)))
con.commit()
cursor.close()
return True
def get_setting(setting):
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
cursor.execute("SELECT * FROM settings WHERE setting='%s'" % (str(setting)))
res = cursor.fetchone()
cursor.close()
if res is not None:
return str(res["value"])
else:
return None
def role_checker(role_name):
def decorator(view):
@functools.wraps(view)
def wrapper(*args, **kwargs):
search_role = int(get_setting(role_name)[3:-1])
authorized = False
user = client.get_user(discord.user_id)
if user is None:
discord.fetch_user()
user = client.get_user(discord.user_id) # NEEDS TO CHANGE WITH THE ABOVE!
for guild in user.mutual_guilds:
try:
guild = client.get_guild(guild.id)
except derrors.Forbidden:
continue
member = guild.get_member(user.id)
roles = [role.id for role in member.roles]
if search_role in roles:
authorized = True
break
if not authorized:
return abort(401)
return view(*args, **kwargs)
return wrapper
return decorator
def roles_getter():
def decorator(view):
@functools.wraps(view)
def wrapper(*args, **kwargs):
user = client.get_user(discord.user_id)
if user is None:
discord.fetch_user()
user = client.get_user(discord.user_id) # NEEDS TO CHANGE WITH THE ABOVE!
role_names = []
for guild in user.mutual_guilds:
try:
guild = client.get_guild(guild.id)
except derrors.Forbidden:
continue
member = guild.get_member(user.id)
roles = [role.id for role in member.roles]
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
for role in roles:
cursor.execute(f"SELECT `setting` FROM `settings` WHERE `value`='<@&{role}>'")
res = cursor.fetchone()
if res is not None:
role_names.append(res["setting"])
return view(roles=role_names, *args, **kwargs)
return wrapper
return decorator
| import functools
import discord.errors as derrors
from flask import abort
from globals import discord, client, connection_pool
def change_setting(setting, value):
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
cursor.execute("SELECT * FROM settings WHERE setting='%s'" % (str(setting)))
if len(cursor.fetchall()) > 0:
cursor.execute("UPDATE settings SET value='%s' WHERE setting='%s'" % (str(value), str(setting)))
else:
cursor.execute("INSERT INTO settings (setting, value) VALUES ('%s', '%s')" % (str(setting), str(value)))
con.commit()
cursor.close()
return True
def get_setting(setting):
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
cursor.execute("SELECT * FROM settings WHERE setting='%s'" % (str(setting)))
res = cursor.fetchone()
cursor.close()
if res is not None:
return str(res["value"])
else:
return None
def role_checker(role_name):
def decorator(view):
@functools.wraps(view)
def wrapper(*args, **kwargs):
search_role = int(get_setting(role_name)[3:-1])
authorized = False
user = client.get_user(discord.user_id)
if user is None:
discord.fetch_user()
user = client.get_user(discord.user_id) # NEEDS TO CHANGE WITH THE ABOVE!
for guild in user.mutual_guilds:
try:
guild = client.get_guild(guild.id)
except derrors.Forbidden:
continue
member = guild.get_member(user.id)
roles = [role.id for role in member.roles]
if search_role in roles:
authorized = True
break
if not authorized:
return abort(401)
return view(*args, **kwargs)
return wrapper
return decorator
def roles_getter():
def decorator(view):
@functools.wraps(view)
def wrapper(*args, **kwargs):
user = client.get_user(discord.user_id)
if user is None:
discord.fetch_user()
user = client.get_user(discord.user_id) # NEEDS TO CHANGE WITH THE ABOVE!
role_names = []
for guild in user.mutual_guilds:
try:
guild = client.get_guild(guild.id)
except derrors.Forbidden:
continue
member = guild.get_member(user.id)
roles = [role.id for role in member.roles]
with connection_pool.connection() as con, con.cursor(dictionary=True) as cursor:
for role in roles:
cursor.execute(f"SELECT `setting` FROM `settings` WHERE `value`='<@&{role}>'")
res = cursor.fetchone()
if res is not None:
role_names.append(res["setting"])
return view(roles=role_names, *args, **kwargs)
return wrapper
return decorator
| en | 0.58842 | # NEEDS TO CHANGE WITH THE ABOVE! # NEEDS TO CHANGE WITH THE ABOVE! | 2.220993 | 2 |
server.py | zhangyi921/CMPUT404-assignment-webserver | 0 | 6619990 | # coding: utf-8
import socketserver
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print ("Got a request of: %s\n" % self.data)
method = self.data.decode().split()[0]
# method not allowed
if method != 'GET':
self.request.sendall(bytearray(response_405, 'utf-8'))
return
path = self.data.decode().split()[1]
# determin the file type
if 'css' in path:
Format = 'css'
else:
Format = 'html'
try:
#safety check
paths = path.split('/')
if '..' in path:
safe = 0
for i in paths:
if i == '..':
safe -= 1
else:
safe += 1
if safe < 0:
raise ValueError('Not Safe!')
# this handles if the requested file exists
if '.' in paths[-1]:
# request for specific file
path = 'www' + path
response = open(path, 'rb')
elif path[-1] == '/':
# request for the index file under that folder
path = 'www' + path + 'index.html'
response = open(path, 'rb')
else:
# redirect
path = path + '/'
try:
# check if redirected link exits in server directory
f = open('www'+path+'index.html')
f.close()
self.request.sendall(bytearray(moved_away_response.format(PATH=path), 'utf-8'))
return
except:
# redirected link does not exit
path = 'not found'
raise ValueError('Not Found')
except:
# requested fiel does not exist
print('404')
path = 'www/404.html'
response = open(path, 'rb')
self.request.sendall(bytearray(response_404.format(FORMAT='html'), 'utf-8'))
self.request.sendfile(response)
response.close()
return
self.request.sendall(bytearray(ok_response.format(FORMAT=Format), 'utf-8'))
self.request.sendfile(response)
response.close()
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
moved_away_response = """HTTP/1.1 301 Permanently moved to {PATH}
Location: {PATH}
"""
response_405 = """HTTP/1.1 405 Method Not Allowed
Server: Yi's server
"""
response_404 = """HTTP/1.1 404 Not Found
Server: Yi's server
Content-Type: text/{FORMAT}
"""
ok_response = """HTTP/1.1 200 OK
Server: Yi's server
Content-Type: text/{FORMAT}
"""
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| # coding: utf-8
import socketserver
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print ("Got a request of: %s\n" % self.data)
method = self.data.decode().split()[0]
# method not allowed
if method != 'GET':
self.request.sendall(bytearray(response_405, 'utf-8'))
return
path = self.data.decode().split()[1]
# determin the file type
if 'css' in path:
Format = 'css'
else:
Format = 'html'
try:
#safety check
paths = path.split('/')
if '..' in path:
safe = 0
for i in paths:
if i == '..':
safe -= 1
else:
safe += 1
if safe < 0:
raise ValueError('Not Safe!')
# this handles if the requested file exists
if '.' in paths[-1]:
# request for specific file
path = 'www' + path
response = open(path, 'rb')
elif path[-1] == '/':
# request for the index file under that folder
path = 'www' + path + 'index.html'
response = open(path, 'rb')
else:
# redirect
path = path + '/'
try:
# check if redirected link exits in server directory
f = open('www'+path+'index.html')
f.close()
self.request.sendall(bytearray(moved_away_response.format(PATH=path), 'utf-8'))
return
except:
# redirected link does not exit
path = 'not found'
raise ValueError('Not Found')
except:
# requested fiel does not exist
print('404')
path = 'www/404.html'
response = open(path, 'rb')
self.request.sendall(bytearray(response_404.format(FORMAT='html'), 'utf-8'))
self.request.sendfile(response)
response.close()
return
self.request.sendall(bytearray(ok_response.format(FORMAT=Format), 'utf-8'))
self.request.sendfile(response)
response.close()
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
moved_away_response = """HTTP/1.1 301 Permanently moved to {PATH}
Location: {PATH}
"""
response_405 = """HTTP/1.1 405 Method Not Allowed
Server: Yi's server
"""
response_404 = """HTTP/1.1 404 Not Found
Server: Yi's server
Content-Type: text/{FORMAT}
"""
ok_response = """HTTP/1.1 200 OK
Server: Yi's server
Content-Type: text/{FORMAT}
"""
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| en | 0.786289 | # coding: utf-8 # Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Furthermore it is derived from the Python documentation examples thus # some of the code is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ # method not allowed # determin the file type #safety check # this handles if the requested file exists # request for specific file # request for the index file under that folder # redirect # check if redirected link exits in server directory # redirected link does not exit # requested fiel does not exist HTTP/1.1 301 Permanently moved to {PATH} Location: {PATH} HTTP/1.1 405 Method Not Allowed Server: Yi's server HTTP/1.1 404 Not Found Server: Yi's server Content-Type: text/{FORMAT} HTTP/1.1 200 OK Server: Yi's server Content-Type: text/{FORMAT} # Create the server, binding to localhost on port 8080 # Activate the server; this will keep running until you # interrupt the program with Ctrl-C | 3.042774 | 3 |
orochi/ya/apps.py | garanews/orochi | 121 | 6619991 | from django.apps import AppConfig
class YaConfig(AppConfig):
name = "orochi.ya"
| from django.apps import AppConfig
class YaConfig(AppConfig):
name = "orochi.ya"
| none | 1 | 1.087693 | 1 | |
server.py | davidaik/item-catalog | 0 | 6619992 | #!/system/bin/env python3
from flask import Flask, request, redirect, url_for, render_template
import html
import calendar
import db_utils
import response
from flask import session as login_session
from flask import jsonify
import random
import string
from google.oauth2 import id_token
from google.auth.transport import requests
import json
import auth
app = Flask(__name__)
# The Google API client ID below is broken
# into multiple lines for pep8 compliance
CLIENT_ID = '{}{}'.format(
'692318378170-ufp0veeknbkbbu24er6h2g3n11c4govm',
'.apps.googleusercontent.com'
)
@app.route('/')
@app.route('/category/<int:category_id>', endpoint='category_page')
def get_index(category_id=0):
categories = db_utils.get_categories()
items = db_utils.get_items(category_id)
page_title = 'Latest Items'
if category_id != 0:
category = db_utils.get_category(category_id)
page_title = category.name
for item in items:
item.nice_date = '{month} {day}, {year}'.format(
month=calendar.month_name[item.created_at.month],
day=item.created_at.day,
year=item.created_at.year)
signed_in = auth.is_signed_in()
is_user_admin = False
if signed_in:
is_user_admin = auth.is_user_admin()
return render_template(
'index.html',
categories=categories,
items=items,
page_title=page_title,
CLIENT_ID=CLIENT_ID,
signed_in=signed_in,
is_user_admin=is_user_admin,
user_name=auth.get_user_name(),
picture=auth.get_user_picture(),
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token()
)
@app.route('/category/new', methods=['GET', 'POST'])
@app.route('/category/<int:id>/edit', methods=['GET', 'POST'])
def get_edit_category_page(id=0):
if request.method == 'GET':
if not auth.is_user_admin():
# Only admins can add and edit catories
return render_template('unauthorized.html')
if id and id != 0:
# id is specified, render edit category page
category = db_utils.get_category(id)
return render_template(
'edit-category.html',
category=category,
CLIENT_ID=CLIENT_ID,
signed_in=auth.is_signed_in(),
picture=login_session.get('picture')
)
else:
return render_template(
'edit-category.html',
CLIENT_ID=CLIENT_ID,
signed_in=auth.is_signed_in(),
picture=login_session.get('picture')
)
elif request.method == 'POST':
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if not auth.is_user_admin():
return response.error('Unauthorized')
if request.form['name'] and request.form['desc']:
if id and id != 0:
# id is specified, update existing category
category = db_utils.update_category(
id, request.form['name'], request.form['desc'])
categoryData = {'id': category.id,
'name': category.name, 'desc': category.desc}
return response.success(url_for('get_index'), categoryData)
else:
category = db_utils.add_category(
request.form['name'], request.form['desc'])
categoryData = {'id': category.id,
'name': category.name, 'desc': category.desc}
return response.success(url_for('get_index'), categoryData)
@app.route('/delete/category/<int:id>', methods=['POST'])
def post_delete_category(id):
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if not auth.is_user_admin():
return response.error('Unauthorized')
db_utils.delete_category(id)
return response.success()
@app.route(
'/item/new',
methods=['GET', 'POST'],
endpoint='new_item')
@app.route(
'/item/<int:id>/edit',
methods=['GET', 'POST'],
endpoint='edit_item'
)
def get_edit_item_page(id=0):
if request.method == 'GET':
if not auth.is_signed_in():
# Redirect to login page.
# The url to which we are redirected will contain a paramenter
# which will be the url to redirect back to
# after logging in
redirect_parameter = None
if id and id != 0:
redirect_parameter = 'redirect={}'.format(
url_for('edit_item', id=id))
else:
redirect_parameter = 'redirect={}'.format(url_for('new_item'))
url = '{path}?{parameter}'.format(
path=url_for('get_login_page'),
parameter=redirect_parameter
)
return redirect(url, 302)
categories = db_utils.get_categories()
item = None
if id and id != 0:
item = db_utils.get_item(id)
if item is None:
return render_template('404.html')
else:
if (not auth.is_user_admin() and
item.user_id != auth.get_user_id()):
# Cannot edit item that does not belong to user
# But admins are allowed
return render_template('unauthorized.html')
return render_template(
'edit-item.html',
item=item,
categories=categories,
CLIENT_ID=CLIENT_ID,
signed_in=auth.is_signed_in(),
user_name=auth.get_user_name(),
picture=login_session.get('picture')
)
elif request.method == 'POST':
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if not auth.is_signed_in():
return response.error('Unauthorized')
if id and id != 0:
# Update item
item = db_utils.get_item(id)
if (not auth.is_user_admin() and
item.user_id != auth.get_user_id()):
# Only item owners and admins allowed to update item
return response.error('Unauthorized')
if (request.form['name'] and
request.form['desc'] and
request.form['cat-id']):
item = db_utils.update_item(
request.form['item-id'],
request.form['name'],
request.form['desc'],
request.form['cat-id']
)
itemData = {
'id': item.id,
'name': item.name,
'desc': item.desc,
'short_desc': item.short_desc,
'category_id': item.category_id
}
return response.success(
url_for(
'get_item_page',
id=itemData['id']
),
itemData
)
else:
return response.error('Failed to save')
else:
# Create new item
if (request.form['name'] and
request.form['desc'] and
request.form['cat-id']):
item = db_utils.add_item(
request.form['name'],
request.form['desc'],
request.form['cat-id'],
auth.get_user_id()
)
itemData = {
'id': item.id,
'name': item.name,
'desc': item.desc,
'short_desc': item.short_desc,
'category_id': item.category_id
}
return response.success(
url_for(
'get_item_page',
id=itemData['id']
),
itemData
)
else:
return response.error('Failed to save')
@app.route('/item/<int:id>', methods=['GET'])
def get_item_page(id):
categories = db_utils.get_categories()
item = db_utils.get_item(id)
recent_items = db_utils.get_recent_items(5)
if item is None:
return render_template('404.html')
item.nice_date = '{month} {day}, {year}'.format(
month=calendar.month_name[
item.created_at.month
],
day=item.created_at.day,
year=item.created_at.year
)
signed_in = auth.is_signed_in()
is_user_admin = False
is_item_owner = False
if signed_in:
is_user_admin = auth.is_user_admin()
is_item_owner = item.user_id == auth.get_user_id()
return render_template(
'item.html',
id=id,
categories=categories,
item=item,
recent_items=recent_items,
CLIENT_ID=CLIENT_ID,
signed_in=signed_in,
is_user_admin=is_user_admin,
is_item_owner=is_item_owner,
user_name=auth.get_user_name(),
picture=login_session.get('picture'),
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token()
)
@app.route('/myitems')
@app.route('/user/<string:user_id>')
def get_my_items_page(user_id=0):
if user_id == 0 and not auth.is_signed_in():
# This would be reached when /myitems is requested.
# Redirect to login page.
# The url to which we are redirected will contain a paramenter
# which will be the url to redirect back to
# after logging in
redirect_parameter = 'redirect={}'.format(url_for('get_my_items_page'))
url = '{path}?{parameter}'.format(
path=url_for('get_login_page'),
parameter=redirect_parameter
)
return redirect(url, 302)
page_title = 'My Items'
if user_id != 0:
user = db_utils.get_user(user_id)
page_title = 'Items by {}'.format(user.name)
categories = db_utils.get_categories()
items = db_utils.get_user_items(user_id if user_id else auth.get_user_id())
for item in items:
item.nice_date = '{month} {day}, {year}'.format(
month=calendar.month_name[item.created_at.month],
day=item.created_at.day,
year=item.created_at.year
)
signed_in = auth.is_signed_in()
is_user_admin = False
if signed_in:
is_user_admin = auth.is_user_admin()
return render_template(
'index.html',
page_title=page_title,
categories=categories,
items=items,
CLIENT_ID=CLIENT_ID,
signed_in=signed_in,
is_user_admin=is_user_admin,
user_name=auth.get_user_name(),
picture=auth.get_user_picture(),
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token()
)
@app.route('/delete/item/<int:id>', methods=['POST'])
def post_delete_item(id):
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
item = db_utils.get_item(id)
db_utils.delete_item(item)
return response.success()
@app.route('/login')
def get_login_page():
return render_template(
'login.html',
CLIENT_ID=CLIENT_ID,
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token())
@app.route('/signin', methods=['POST'])
def do_sign_in():
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if (request.form['signin_request_token'] !=
login_session['signin_request_token']):
return response.error('Invalid token.')
g_id_token = request.form['id_token']
try:
idinfo = id_token.verify_oauth2_token(
g_id_token, requests.Request(), CLIENT_ID)
if (idinfo['iss'] not in
['accounts.google.com', 'https://accounts.google.com']):
raise ValueError('Wrong issuer.')
if idinfo['aud'] != CLIENT_ID:
raise ValueError('Invalid client id.')
except ValueError:
return response.error('Could not sign in')
user_id = idinfo['sub']
stored_id_token = login_session.get('id_token')
stored_user_id = login_session.get('user_id')
user = db_utils.get_user(user_id)
if user is None:
# Add user to database if id does not exist.
db_utils.add_user(user_id, idinfo['email'], idinfo['name'])
if stored_id_token is not None and stored_user_id == user_id:
return response.success()
# Store the access token in the session for later use.
login_session['id_token'] = g_id_token
login_session['user_id'] = user_id
login_session['name'] = idinfo['name']
login_session['email'] = idinfo['email']
login_session['picture'] = idinfo['picture']
return response.success()
@app.route('/signout', methods=['POST'])
def do_sign_out():
login_session.clear()
return response.success()
# JSON endpoints
@app.route('/items.json')
def get_all_items_json():
items = db_utils.get_items(0)
item_list = []
for item in items:
item_list.append({
'id': item.id,
'name': item.name,
'created_at': item.created_at,
'updated_at': item.updated_at,
'category_id': item.category_id,
'category_name': item.category.name,
'user_id': item.user_id,
'short_desc': item.short_desc,
'desc': item.desc
})
return jsonify(item_list), 200
@app.route('/item.json')
def get_item_json():
id = request.args.get('id')
if not id:
return response.error('Item id not specified.')
item = db_utils.get_item(id)
item_dict = {
'id': item.id,
'name': item.name,
'created_at': item.created_at,
'updated_at': item.updated_at,
'category_id': item.category_id,
'category_name': item.category.name,
'user_id': item.user_id,
'short_desc': item.short_desc,
'desc': item.desc
}
return jsonify(item_dict), 200
if __name__ == '__main__':
app.secret_key = '<KEY>'
app.debug = True
app.run(host='0.0.0.0', port=8000, threaded=False)
| #!/system/bin/env python3
from flask import Flask, request, redirect, url_for, render_template
import html
import calendar
import db_utils
import response
from flask import session as login_session
from flask import jsonify
import random
import string
from google.oauth2 import id_token
from google.auth.transport import requests
import json
import auth
app = Flask(__name__)
# The Google API client ID below is broken
# into multiple lines for pep8 compliance
CLIENT_ID = '{}{}'.format(
'692318378170-ufp0veeknbkbbu24er6h2g3n11c4govm',
'.apps.googleusercontent.com'
)
@app.route('/')
@app.route('/category/<int:category_id>', endpoint='category_page')
def get_index(category_id=0):
categories = db_utils.get_categories()
items = db_utils.get_items(category_id)
page_title = 'Latest Items'
if category_id != 0:
category = db_utils.get_category(category_id)
page_title = category.name
for item in items:
item.nice_date = '{month} {day}, {year}'.format(
month=calendar.month_name[item.created_at.month],
day=item.created_at.day,
year=item.created_at.year)
signed_in = auth.is_signed_in()
is_user_admin = False
if signed_in:
is_user_admin = auth.is_user_admin()
return render_template(
'index.html',
categories=categories,
items=items,
page_title=page_title,
CLIENT_ID=CLIENT_ID,
signed_in=signed_in,
is_user_admin=is_user_admin,
user_name=auth.get_user_name(),
picture=auth.get_user_picture(),
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token()
)
@app.route('/category/new', methods=['GET', 'POST'])
@app.route('/category/<int:id>/edit', methods=['GET', 'POST'])
def get_edit_category_page(id=0):
if request.method == 'GET':
if not auth.is_user_admin():
# Only admins can add and edit catories
return render_template('unauthorized.html')
if id and id != 0:
# id is specified, render edit category page
category = db_utils.get_category(id)
return render_template(
'edit-category.html',
category=category,
CLIENT_ID=CLIENT_ID,
signed_in=auth.is_signed_in(),
picture=login_session.get('picture')
)
else:
return render_template(
'edit-category.html',
CLIENT_ID=CLIENT_ID,
signed_in=auth.is_signed_in(),
picture=login_session.get('picture')
)
elif request.method == 'POST':
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if not auth.is_user_admin():
return response.error('Unauthorized')
if request.form['name'] and request.form['desc']:
if id and id != 0:
# id is specified, update existing category
category = db_utils.update_category(
id, request.form['name'], request.form['desc'])
categoryData = {'id': category.id,
'name': category.name, 'desc': category.desc}
return response.success(url_for('get_index'), categoryData)
else:
category = db_utils.add_category(
request.form['name'], request.form['desc'])
categoryData = {'id': category.id,
'name': category.name, 'desc': category.desc}
return response.success(url_for('get_index'), categoryData)
@app.route('/delete/category/<int:id>', methods=['POST'])
def post_delete_category(id):
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if not auth.is_user_admin():
return response.error('Unauthorized')
db_utils.delete_category(id)
return response.success()
@app.route(
'/item/new',
methods=['GET', 'POST'],
endpoint='new_item')
@app.route(
'/item/<int:id>/edit',
methods=['GET', 'POST'],
endpoint='edit_item'
)
def get_edit_item_page(id=0):
if request.method == 'GET':
if not auth.is_signed_in():
# Redirect to login page.
# The url to which we are redirected will contain a paramenter
# which will be the url to redirect back to
# after logging in
redirect_parameter = None
if id and id != 0:
redirect_parameter = 'redirect={}'.format(
url_for('edit_item', id=id))
else:
redirect_parameter = 'redirect={}'.format(url_for('new_item'))
url = '{path}?{parameter}'.format(
path=url_for('get_login_page'),
parameter=redirect_parameter
)
return redirect(url, 302)
categories = db_utils.get_categories()
item = None
if id and id != 0:
item = db_utils.get_item(id)
if item is None:
return render_template('404.html')
else:
if (not auth.is_user_admin() and
item.user_id != auth.get_user_id()):
# Cannot edit item that does not belong to user
# But admins are allowed
return render_template('unauthorized.html')
return render_template(
'edit-item.html',
item=item,
categories=categories,
CLIENT_ID=CLIENT_ID,
signed_in=auth.is_signed_in(),
user_name=auth.get_user_name(),
picture=login_session.get('picture')
)
elif request.method == 'POST':
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if not auth.is_signed_in():
return response.error('Unauthorized')
if id and id != 0:
# Update item
item = db_utils.get_item(id)
if (not auth.is_user_admin() and
item.user_id != auth.get_user_id()):
# Only item owners and admins allowed to update item
return response.error('Unauthorized')
if (request.form['name'] and
request.form['desc'] and
request.form['cat-id']):
item = db_utils.update_item(
request.form['item-id'],
request.form['name'],
request.form['desc'],
request.form['cat-id']
)
itemData = {
'id': item.id,
'name': item.name,
'desc': item.desc,
'short_desc': item.short_desc,
'category_id': item.category_id
}
return response.success(
url_for(
'get_item_page',
id=itemData['id']
),
itemData
)
else:
return response.error('Failed to save')
else:
# Create new item
if (request.form['name'] and
request.form['desc'] and
request.form['cat-id']):
item = db_utils.add_item(
request.form['name'],
request.form['desc'],
request.form['cat-id'],
auth.get_user_id()
)
itemData = {
'id': item.id,
'name': item.name,
'desc': item.desc,
'short_desc': item.short_desc,
'category_id': item.category_id
}
return response.success(
url_for(
'get_item_page',
id=itemData['id']
),
itemData
)
else:
return response.error('Failed to save')
@app.route('/item/<int:id>', methods=['GET'])
def get_item_page(id):
categories = db_utils.get_categories()
item = db_utils.get_item(id)
recent_items = db_utils.get_recent_items(5)
if item is None:
return render_template('404.html')
item.nice_date = '{month} {day}, {year}'.format(
month=calendar.month_name[
item.created_at.month
],
day=item.created_at.day,
year=item.created_at.year
)
signed_in = auth.is_signed_in()
is_user_admin = False
is_item_owner = False
if signed_in:
is_user_admin = auth.is_user_admin()
is_item_owner = item.user_id == auth.get_user_id()
return render_template(
'item.html',
id=id,
categories=categories,
item=item,
recent_items=recent_items,
CLIENT_ID=CLIENT_ID,
signed_in=signed_in,
is_user_admin=is_user_admin,
is_item_owner=is_item_owner,
user_name=auth.get_user_name(),
picture=login_session.get('picture'),
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token()
)
@app.route('/myitems')
@app.route('/user/<string:user_id>')
def get_my_items_page(user_id=0):
if user_id == 0 and not auth.is_signed_in():
# This would be reached when /myitems is requested.
# Redirect to login page.
# The url to which we are redirected will contain a paramenter
# which will be the url to redirect back to
# after logging in
redirect_parameter = 'redirect={}'.format(url_for('get_my_items_page'))
url = '{path}?{parameter}'.format(
path=url_for('get_login_page'),
parameter=redirect_parameter
)
return redirect(url, 302)
page_title = 'My Items'
if user_id != 0:
user = db_utils.get_user(user_id)
page_title = 'Items by {}'.format(user.name)
categories = db_utils.get_categories()
items = db_utils.get_user_items(user_id if user_id else auth.get_user_id())
for item in items:
item.nice_date = '{month} {day}, {year}'.format(
month=calendar.month_name[item.created_at.month],
day=item.created_at.day,
year=item.created_at.year
)
signed_in = auth.is_signed_in()
is_user_admin = False
if signed_in:
is_user_admin = auth.is_user_admin()
return render_template(
'index.html',
page_title=page_title,
categories=categories,
items=items,
CLIENT_ID=CLIENT_ID,
signed_in=signed_in,
is_user_admin=is_user_admin,
user_name=auth.get_user_name(),
picture=auth.get_user_picture(),
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token()
)
@app.route('/delete/item/<int:id>', methods=['POST'])
def post_delete_item(id):
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
item = db_utils.get_item(id)
db_utils.delete_item(item)
return response.success()
@app.route('/login')
def get_login_page():
return render_template(
'login.html',
CLIENT_ID=CLIENT_ID,
SIGNIN_REQUEST_TOKEN=auth.get_signin_request_token())
@app.route('/signin', methods=['POST'])
def do_sign_in():
# This is meant to be reached from AJAX request.
# We return a JSON response that will be used by
# The JS code making the request.
if (request.form['signin_request_token'] !=
login_session['signin_request_token']):
return response.error('Invalid token.')
g_id_token = request.form['id_token']
try:
idinfo = id_token.verify_oauth2_token(
g_id_token, requests.Request(), CLIENT_ID)
if (idinfo['iss'] not in
['accounts.google.com', 'https://accounts.google.com']):
raise ValueError('Wrong issuer.')
if idinfo['aud'] != CLIENT_ID:
raise ValueError('Invalid client id.')
except ValueError:
return response.error('Could not sign in')
user_id = idinfo['sub']
stored_id_token = login_session.get('id_token')
stored_user_id = login_session.get('user_id')
user = db_utils.get_user(user_id)
if user is None:
# Add user to database if id does not exist.
db_utils.add_user(user_id, idinfo['email'], idinfo['name'])
if stored_id_token is not None and stored_user_id == user_id:
return response.success()
# Store the access token in the session for later use.
login_session['id_token'] = g_id_token
login_session['user_id'] = user_id
login_session['name'] = idinfo['name']
login_session['email'] = idinfo['email']
login_session['picture'] = idinfo['picture']
return response.success()
@app.route('/signout', methods=['POST'])
def do_sign_out():
login_session.clear()
return response.success()
# JSON endpoints
@app.route('/items.json')
def get_all_items_json():
items = db_utils.get_items(0)
item_list = []
for item in items:
item_list.append({
'id': item.id,
'name': item.name,
'created_at': item.created_at,
'updated_at': item.updated_at,
'category_id': item.category_id,
'category_name': item.category.name,
'user_id': item.user_id,
'short_desc': item.short_desc,
'desc': item.desc
})
return jsonify(item_list), 200
@app.route('/item.json')
def get_item_json():
id = request.args.get('id')
if not id:
return response.error('Item id not specified.')
item = db_utils.get_item(id)
item_dict = {
'id': item.id,
'name': item.name,
'created_at': item.created_at,
'updated_at': item.updated_at,
'category_id': item.category_id,
'category_name': item.category.name,
'user_id': item.user_id,
'short_desc': item.short_desc,
'desc': item.desc
}
return jsonify(item_dict), 200
if __name__ == '__main__':
app.secret_key = '<KEY>'
app.debug = True
app.run(host='0.0.0.0', port=8000, threaded=False)
| en | 0.923022 | #!/system/bin/env python3 # The Google API client ID below is broken # into multiple lines for pep8 compliance # Only admins can add and edit catories # id is specified, render edit category page # This is meant to be reached from AJAX request. # We return a JSON response that will be used by # The JS code making the request. # id is specified, update existing category # This is meant to be reached from AJAX request. # We return a JSON response that will be used by # The JS code making the request. # Redirect to login page. # The url to which we are redirected will contain a paramenter # which will be the url to redirect back to # after logging in # Cannot edit item that does not belong to user # But admins are allowed # This is meant to be reached from AJAX request. # We return a JSON response that will be used by # The JS code making the request. # Update item # Only item owners and admins allowed to update item # Create new item # This would be reached when /myitems is requested. # Redirect to login page. # The url to which we are redirected will contain a paramenter # which will be the url to redirect back to # after logging in # This is meant to be reached from AJAX request. # We return a JSON response that will be used by # The JS code making the request. # This is meant to be reached from AJAX request. # We return a JSON response that will be used by # The JS code making the request. # Add user to database if id does not exist. # Store the access token in the session for later use. # JSON endpoints | 2.375687 | 2 |
ml.py | imsansan/WeatherAus | 0 | 6619993 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.metrics import roc_curve as ROC
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore")
def dataWash(city, path: str):
weather = pd.read_csv(path)
X = weather.iloc[:,:-1]
Y = weather.loc[:,("Location","RainTomorrow")]
X = X.loc[X.loc[:,"Location"] == city]
Y = Y.loc[Y.loc[:,"Location"] == city]
Y =Y.drop(['Location'], axis=1)
X =X.drop(['Location'], axis=1)
#get month
X["Date"] = X["Date"].apply(lambda x:int(x.split("/")[1]))
X = X.rename(columns={"Date":"Month"})
#fill Null object-data up with most frequent value
cate = X.columns[X.dtypes == "object"].tolist()
si = SimpleImputer(missing_values=np.nan,strategy="most_frequent")
si.fit(X.loc[:,cate])
X.loc[:,cate] = si.transform(X.loc[:,cate])
#encode object data
oe = OrdinalEncoder()
oe = oe.fit(X.loc[:,cate])
X.loc[:,cate] = oe.transform(X.loc[:,cate])
oe = oe.fit(Y.loc[:,:])
Y.loc[:,:] = oe.transform(Y.loc[:,:])
#fill float data up with mean value.
col = X.columns[X.dtypes == "float64"].tolist()
impmean = SimpleImputer(missing_values=np.nan,strategy = "mean")
impmean = impmean.fit(X.loc[:,col])
X.loc[:,col] = impmean.transform(X.loc[:,col])
return X, Y
def Solution(city, Xt, Yt):
Xtrain, Xtest, Ytrain, Ytest = train_test_split(Xt,Yt,test_size=0.3)
Xreal, Yreal = dataWash(city, '%s.csv' % (city))
print(Xreal)
print(Yreal)
for i in [Xtrain,Xtest,Ytrain,Ytest]:
i.index = range(i.shape[0])
clf = LogisticRegression()
clf.fit(Xtrain, Ytrain.values.ravel())
result = clf.predict(Xtest)
score = clf.score(Xtest,Ytest.values.ravel())
recall = recall_score(Ytest.values.ravel(), result)
auc = roc_auc_score(Ytest.values.ravel(),clf.decision_function(Xtest))
#print("LR's testing accuracy %f, recall is %f, auc is %f" % (score,recall,auc))
#print(clf.predict(Xreal))
#print(clf.score(Xtrain, Ytrain.values.ravel()))
'''
#draw ROC curve
FPR, Recall, thresholds = ROC(Ytest,clf.decision_function(Xtest),pos_label=1)
area = roc_auc_score(Ytest,clf.decision_function(Xtest))
plt.figure()
plt.plot(FPR, Recall, color='red',
label='ROC curve (area = %0.2f)' % area)
plt.plot([0, 1], [0, 1], color='black', linestyle='--')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('Recall')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
'''
#report
#print(classification_report(Ytest.values.ravel(), result))
| import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.metrics import roc_curve as ROC
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore")
def dataWash(city, path: str):
weather = pd.read_csv(path)
X = weather.iloc[:,:-1]
Y = weather.loc[:,("Location","RainTomorrow")]
X = X.loc[X.loc[:,"Location"] == city]
Y = Y.loc[Y.loc[:,"Location"] == city]
Y =Y.drop(['Location'], axis=1)
X =X.drop(['Location'], axis=1)
#get month
X["Date"] = X["Date"].apply(lambda x:int(x.split("/")[1]))
X = X.rename(columns={"Date":"Month"})
#fill Null object-data up with most frequent value
cate = X.columns[X.dtypes == "object"].tolist()
si = SimpleImputer(missing_values=np.nan,strategy="most_frequent")
si.fit(X.loc[:,cate])
X.loc[:,cate] = si.transform(X.loc[:,cate])
#encode object data
oe = OrdinalEncoder()
oe = oe.fit(X.loc[:,cate])
X.loc[:,cate] = oe.transform(X.loc[:,cate])
oe = oe.fit(Y.loc[:,:])
Y.loc[:,:] = oe.transform(Y.loc[:,:])
#fill float data up with mean value.
col = X.columns[X.dtypes == "float64"].tolist()
impmean = SimpleImputer(missing_values=np.nan,strategy = "mean")
impmean = impmean.fit(X.loc[:,col])
X.loc[:,col] = impmean.transform(X.loc[:,col])
return X, Y
def Solution(city, Xt, Yt):
Xtrain, Xtest, Ytrain, Ytest = train_test_split(Xt,Yt,test_size=0.3)
Xreal, Yreal = dataWash(city, '%s.csv' % (city))
print(Xreal)
print(Yreal)
for i in [Xtrain,Xtest,Ytrain,Ytest]:
i.index = range(i.shape[0])
clf = LogisticRegression()
clf.fit(Xtrain, Ytrain.values.ravel())
result = clf.predict(Xtest)
score = clf.score(Xtest,Ytest.values.ravel())
recall = recall_score(Ytest.values.ravel(), result)
auc = roc_auc_score(Ytest.values.ravel(),clf.decision_function(Xtest))
#print("LR's testing accuracy %f, recall is %f, auc is %f" % (score,recall,auc))
#print(clf.predict(Xreal))
#print(clf.score(Xtrain, Ytrain.values.ravel()))
'''
#draw ROC curve
FPR, Recall, thresholds = ROC(Ytest,clf.decision_function(Xtest),pos_label=1)
area = roc_auc_score(Ytest,clf.decision_function(Xtest))
plt.figure()
plt.plot(FPR, Recall, color='red',
label='ROC curve (area = %0.2f)' % area)
plt.plot([0, 1], [0, 1], color='black', linestyle='--')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('Recall')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
'''
#report
#print(classification_report(Ytest.values.ravel(), result))
| en | 0.277787 | #get month #fill Null object-data up with most frequent value #encode object data #fill float data up with mean value. #print("LR's testing accuracy %f, recall is %f, auc is %f" % (score,recall,auc)) #print(clf.predict(Xreal)) #print(clf.score(Xtrain, Ytrain.values.ravel())) #draw ROC curve FPR, Recall, thresholds = ROC(Ytest,clf.decision_function(Xtest),pos_label=1) area = roc_auc_score(Ytest,clf.decision_function(Xtest)) plt.figure() plt.plot(FPR, Recall, color='red', label='ROC curve (area = %0.2f)' % area) plt.plot([0, 1], [0, 1], color='black', linestyle='--') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('Recall') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() #report #print(classification_report(Ytest.values.ravel(), result)) | 2.870983 | 3 |
main.py | deadblue/fanfou-togame | 2 | 6619994 | <reponame>deadblue/fanfou-togame<gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = 'deadblue'
# init context at first
import webapp.context
import webapp
app = webapp.create_app()
__all__ = ['app']
| # -*- coding: utf-8 -*-
__author__ = 'deadblue'
# init context at first
import webapp.context
import webapp
app = webapp.create_app()
__all__ = ['app'] | en | 0.796956 | # -*- coding: utf-8 -*- # init context at first | 1.350604 | 1 |
agents/DDQN.py | HTRPOCODES/HTRPO-v2 | 7 | 6619995 | import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
import numpy as np
import basenets
from agents.DQN import DQN
from rlnets import FCDQN
import copy
from .config import DQN_CONFIG
class DDQN(DQN):
def __init__(self,hyperparams):
config = copy.deepcopy(DQN_CONFIG)
config.update(hyperparams)
DQN.__init__(self, config)
if type(self) == DDQN:
self.e_DQN = FCDQN(self.n_states, self.n_actions,
n_hiddens=config['hidden_layers'],
usebn=config['use_batch_norm'],
nonlinear=config['act_func'])
self.t_DQN = FCDQN(self.n_states, self.n_actions,
n_hiddens=config['hidden_layers'],
usebn=config['use_batch_norm'],
nonlinear=config['act_func'])
self.lossfunc = config['loss']()
if self.mom == 0 or self.mom is None:
self.optimizer = config['optimizer'](self.e_DQN.parameters(),lr = self.lr)
else:
self.optimizer = config['optimizer'](self.e_DQN.parameters(), lr=self.lr, momentum = self.mom)
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.hard_update(self.t_DQN, self.e_DQN)
batch_memory = self.sample_batch(self.batch_size)[0]
self.r = self.r.resize_(batch_memory['reward'].shape).copy_(torch.Tensor(batch_memory['reward']))
self.done = self.done.resize_(batch_memory['done'].shape).copy_(torch.Tensor(batch_memory['done']))
self.s_ = self.s_.resize_(batch_memory['next_state'].shape).copy_(torch.Tensor(batch_memory['next_state']))
self.a = self.a.resize_(batch_memory['action'].shape).copy_(torch.Tensor(batch_memory['action']))
self.s = self.s.resize_(batch_memory['state'].shape).copy_(torch.Tensor(batch_memory['state']))
q_target = self.t_DQN(self.s_)
q_eval_wrt_s_ = self.e_DQN(self.s_)
a_eval_wrt_s_ = torch.max(q_eval_wrt_s_,1)[1].view(self.batch_size, 1)
q_target = self.r + self.gamma * q_target.gather(1, a_eval_wrt_s_)
q_eval = self.e_DQN(self.s)
q_eval_wrt_a = q_eval.gather(1, self.a.long())
q_target = q_target.detach()
self.loss = self.lossfunc(q_eval_wrt_a, q_target)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
self.cost_his.append(self.loss.data)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
| import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
import numpy as np
import basenets
from agents.DQN import DQN
from rlnets import FCDQN
import copy
from .config import DQN_CONFIG
class DDQN(DQN):
def __init__(self,hyperparams):
config = copy.deepcopy(DQN_CONFIG)
config.update(hyperparams)
DQN.__init__(self, config)
if type(self) == DDQN:
self.e_DQN = FCDQN(self.n_states, self.n_actions,
n_hiddens=config['hidden_layers'],
usebn=config['use_batch_norm'],
nonlinear=config['act_func'])
self.t_DQN = FCDQN(self.n_states, self.n_actions,
n_hiddens=config['hidden_layers'],
usebn=config['use_batch_norm'],
nonlinear=config['act_func'])
self.lossfunc = config['loss']()
if self.mom == 0 or self.mom is None:
self.optimizer = config['optimizer'](self.e_DQN.parameters(),lr = self.lr)
else:
self.optimizer = config['optimizer'](self.e_DQN.parameters(), lr=self.lr, momentum = self.mom)
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.hard_update(self.t_DQN, self.e_DQN)
batch_memory = self.sample_batch(self.batch_size)[0]
self.r = self.r.resize_(batch_memory['reward'].shape).copy_(torch.Tensor(batch_memory['reward']))
self.done = self.done.resize_(batch_memory['done'].shape).copy_(torch.Tensor(batch_memory['done']))
self.s_ = self.s_.resize_(batch_memory['next_state'].shape).copy_(torch.Tensor(batch_memory['next_state']))
self.a = self.a.resize_(batch_memory['action'].shape).copy_(torch.Tensor(batch_memory['action']))
self.s = self.s.resize_(batch_memory['state'].shape).copy_(torch.Tensor(batch_memory['state']))
q_target = self.t_DQN(self.s_)
q_eval_wrt_s_ = self.e_DQN(self.s_)
a_eval_wrt_s_ = torch.max(q_eval_wrt_s_,1)[1].view(self.batch_size, 1)
q_target = self.r + self.gamma * q_target.gather(1, a_eval_wrt_s_)
q_eval = self.e_DQN(self.s)
q_eval_wrt_a = q_eval.gather(1, self.a.long())
q_target = q_target.detach()
self.loss = self.lossfunc(q_eval_wrt_a, q_target)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
self.cost_his.append(self.loss.data)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
| en | 0.172514 | # check to replace target parameters # increasing epsilon | 2.143865 | 2 |
djangosige/configs/settings.py | wfoschiera/djangoSIGE | 0 | 6619996 | <gh_stars>0
# import os
from decouple import config, Csv
import dj_database_url
from functools import partial
from pathlib import Path
from .configs import DEFAULT_FROM_EMAIL, EMAIL_HOST, EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_PORT, EMAIL_USE_TLS
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_ROOT = BASE_DIR.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
default_db_url = 'sqlite:///' + str(BASE_DIR / 'db.sqlite3')
parse_database = partial(dj_database_url.parse, conn_max_age=600)
DATABASES = {
'default': config('DATABASE_URL', default=default_db_url, cast=parse_database)
}
SESSION_EXPIRE_AT_BROWSER_CLOSE = config('SESSION_EXPIRE_AT_BROWSER_CLOSE', cast=bool)
LOGIN_NOT_REQUIRED = (
r'^/login/$',
r'/login/esqueceu/',
r'/login/trocarsenha/',
r'/logout/',
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'collectfast',
'cloudinary',
'django.contrib.staticfiles',
'cloudinary_storage',
# djangosige apps:
'djangosige.apps.base',
'djangosige.apps.login',
'djangosige.apps.cadastro',
'djangosige.apps.vendas',
'djangosige.apps.compras',
'djangosige.apps.fiscal',
'djangosige.apps.financeiro',
'djangosige.apps.estoque',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Middleware para paginas que exigem login
'djangosige.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'djangosige.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# contexto para a versao do sige
'djangosige.apps.base.context_version.sige_version',
# contexto para a foto de perfil do usuario
'djangosige.apps.login.context_user.foto_usuario',
],
},
},
]
WSGI_APPLICATION = 'djangosige.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'pt-br'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Configurações do ambiente de desenvolvimento
STATIC_ROOT = BASE_DIR / 'staticfiles'
STATIC_URL = '/sige/static/'
MEDIA_URL = '/sige/media/'
MEDIA_ROOT = BASE_DIR / 'media'
STATICFILES_DIRS = (
BASE_DIR / 'static',
)
FIXTURE_DIRS = [
BASE_DIR / 'fixtures/',
]
CLOUDINARY_URL = config('CLOUDINARY_URL', default=False)
COLLECTFAST_ENABLED = False
# Storage configuration in
if CLOUDINARY_URL:
CLOUDINARY_STORAGE = { # pragma: no cover
'CLOUD_NAME': config('CLOUD_NAME'),
'API_KEY': config('API_KEY'),
'API_SECRET': config('API_SECRET')
}
# static assets
STATICFILES_STORAGE = 'cloudinary_storage.storage.StaticHashedCloudinaryStorage' # pragma: no cover
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/' # pragma: no cover
# Media assets
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage' # pragma: no cover
COLLECTFAST_ENABLED = False
# COLLECTFAST_DEBUG = True
COLLECTFAST_STRATEGY = 'collectfast.strategies.filesystem.FileSystemStrategy'
| # import os
from decouple import config, Csv
import dj_database_url
from functools import partial
from pathlib import Path
from .configs import DEFAULT_FROM_EMAIL, EMAIL_HOST, EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_PORT, EMAIL_USE_TLS
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_ROOT = BASE_DIR.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
default_db_url = 'sqlite:///' + str(BASE_DIR / 'db.sqlite3')
parse_database = partial(dj_database_url.parse, conn_max_age=600)
DATABASES = {
'default': config('DATABASE_URL', default=default_db_url, cast=parse_database)
}
SESSION_EXPIRE_AT_BROWSER_CLOSE = config('SESSION_EXPIRE_AT_BROWSER_CLOSE', cast=bool)
LOGIN_NOT_REQUIRED = (
r'^/login/$',
r'/login/esqueceu/',
r'/login/trocarsenha/',
r'/logout/',
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'collectfast',
'cloudinary',
'django.contrib.staticfiles',
'cloudinary_storage',
# djangosige apps:
'djangosige.apps.base',
'djangosige.apps.login',
'djangosige.apps.cadastro',
'djangosige.apps.vendas',
'djangosige.apps.compras',
'djangosige.apps.fiscal',
'djangosige.apps.financeiro',
'djangosige.apps.estoque',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Middleware para paginas que exigem login
'djangosige.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'djangosige.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# contexto para a versao do sige
'djangosige.apps.base.context_version.sige_version',
# contexto para a foto de perfil do usuario
'djangosige.apps.login.context_user.foto_usuario',
],
},
},
]
WSGI_APPLICATION = 'djangosige.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'pt-br'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Configurações do ambiente de desenvolvimento
STATIC_ROOT = BASE_DIR / 'staticfiles'
STATIC_URL = '/sige/static/'
MEDIA_URL = '/sige/media/'
MEDIA_ROOT = BASE_DIR / 'media'
STATICFILES_DIRS = (
BASE_DIR / 'static',
)
FIXTURE_DIRS = [
BASE_DIR / 'fixtures/',
]
CLOUDINARY_URL = config('CLOUDINARY_URL', default=False)
COLLECTFAST_ENABLED = False
# Storage configuration in
if CLOUDINARY_URL:
CLOUDINARY_STORAGE = { # pragma: no cover
'CLOUD_NAME': config('CLOUD_NAME'),
'API_KEY': config('API_KEY'),
'API_SECRET': config('API_SECRET')
}
# static assets
STATICFILES_STORAGE = 'cloudinary_storage.storage.StaticHashedCloudinaryStorage' # pragma: no cover
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/' # pragma: no cover
# Media assets
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage' # pragma: no cover
COLLECTFAST_ENABLED = False
# COLLECTFAST_DEBUG = True
COLLECTFAST_STRATEGY = 'collectfast.strategies.filesystem.FileSystemStrategy' | en | 0.465929 | # import os # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # djangosige apps: # Middleware para paginas que exigem login # contexto para a versao do sige # contexto para a foto de perfil do usuario # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ # LANGUAGE_CODE = 'en-us' # TIME_ZONE = 'UTC' # Configurações do ambiente de desenvolvimento # Storage configuration in # pragma: no cover # static assets # pragma: no cover # pragma: no cover # Media assets # pragma: no cover # COLLECTFAST_DEBUG = True | 1.9115 | 2 |
aes/synthesis/clean.py | torusresearch/circuits | 3 | 6619997 | <filename>aes/synthesis/clean.py
import sys
import re
if len(sys.argv) < 2:
sys.exit("Usage: python clean.py [netlist file name]")
filename = sys.argv[1]
file = open(filename, mode='r')
netlist = file.read()
file.close()
netlist = netlist.strip()
netlist_array = netlist.split(";")
new_netlist = ""
for netlist_elem in netlist_array:
new_netlist_elem = netlist_elem.strip().replace('\n', '').replace('\r', '')
new_netlist_elem = re.sub('\s+', ' ', new_netlist_elem)
if (new_netlist_elem.find(".Z()") == -1) and (new_netlist_elem != ""):
new_netlist += new_netlist_elem + ";\n"
file = open(filename, mode='w')
file.write(new_netlist)
file.close() | <filename>aes/synthesis/clean.py
import sys
import re
if len(sys.argv) < 2:
sys.exit("Usage: python clean.py [netlist file name]")
filename = sys.argv[1]
file = open(filename, mode='r')
netlist = file.read()
file.close()
netlist = netlist.strip()
netlist_array = netlist.split(";")
new_netlist = ""
for netlist_elem in netlist_array:
new_netlist_elem = netlist_elem.strip().replace('\n', '').replace('\r', '')
new_netlist_elem = re.sub('\s+', ' ', new_netlist_elem)
if (new_netlist_elem.find(".Z()") == -1) and (new_netlist_elem != ""):
new_netlist += new_netlist_elem + ";\n"
file = open(filename, mode='w')
file.write(new_netlist)
file.close() | none | 1 | 2.93169 | 3 | |
bunruija/classifiers/qrnn/model.py | tma15/bunruija | 4 | 6619998 | <reponame>tma15/bunruija<filename>bunruija/classifiers/qrnn/model.py
import numpy as np
import torch
from bunruija.classifiers.classifier import NeuralBaseClassifier
class QRNNLayer(torch.nn.Module):
def __init__(self, input_size, output_size, window_size=2, bidirectional=True):
super().__init__()
self.num_gates = 3
self.window_size = window_size
self.input_size = input_size
self.output_size = output_size
self.bidirectional = bidirectional
if self.bidirectional:
self.fc = torch.nn.Linear(
self.window_size * input_size,
2 * output_size * self.num_gates)
else:
self.fc = torch.nn.Linear(
self.window_size * input_size,
output_size * self.num_gates)
def forward(self, x):
bsz = x.size(0)
seq_len = x.size(1)
window_tokens = [x]
for i in range(self.window_size - 1):
prev_x = x[:, :-(i + 1), :]
prev_x = torch.cat(
[prev_x.new_zeros(bsz, i + 1, self.input_size), prev_x],
dim=1)
window_tokens.insert(0, prev_x)
x = torch.stack(window_tokens, dim=2)
x = x.view(bsz, seq_len, -1)
x = self.fc(x)
z, f, o = x.chunk(self.num_gates, dim=2)
z = torch.tanh(z)
f = torch.sigmoid(f)
seq_len = z.size(1)
c = torch.zeros_like(z)
if self.bidirectional:
c = c.view(bsz, seq_len, 2, self.output_size)
f = f.view(bsz, seq_len, 2, self.output_size)
z = z.view(bsz, seq_len, 2, self.output_size)
for t in range(seq_len):
if t == 0:
c[:, t, 0] = (1 - f[:, t, 0]) * z[:, t, 0]
else:
c[:, t, 0] = f[:, t, 0] * c[:, t - 1, 0].clone() + (1 - f[:, t, 0]) * z[:, t, 0]
for t in range(seq_len - 1, -1, -1):
if t == seq_len - 1:
c[:, t, 0] = (1 - f[:, t, 0]) * z[:, t, 0]
else:
c[:, t, 0] = f[:, t, 0] * c[:, t + 1, 0].clone() + (1 - f[:, t, 0]) * z[:, t, 0]
c = c.view(bsz, seq_len, 2 * self.output_size)
else:
for t in range(seq_len):
if t == 0:
c[:, t] = (1 - f[:, t]) * z[:, t]
else:
c[:, t] = f[:, t] * c[:, t -1].clone() + (1 - f[:, t]) * z[:, t]
h = torch.sigmoid(o) * c
return h
class QRNN(NeuralBaseClassifier):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dim_emb = kwargs.get('dim_emb', 256)
self.dim_hid = kwargs.get('dim_hid', 128)
self.window_size = kwargs.get('window_size', 3)
self.layers = torch.nn.ModuleList()
self.bidirectional = kwargs.get('bidirectional', True)
num_layers = kwargs.get('num_layers', 2)
for i in range(num_layers):
if i == 0:
input_size = self.dim_emb
else:
input_size = 2 * self.dim_hid if self.bidirectional else self.dim_hid
self.layers.append(QRNNLayer(
input_size,
self.dim_hid,
window_size=self.window_size,
bidirectional=self.bidirectional))
def init_layer(self, data):
self.pad = 0
max_input_idx = 0
for data_i in data:
max_input_idx = max(max_input_idx, np.max(data_i['inputs']))
self.embed = torch.nn.Embedding(
max_input_idx + 1,
self.dim_emb,
padding_idx=0,
)
self.out = torch.nn.Linear(
2 * self.dim_hid if self.bidirectional else self.dim_hid,
len(self.labels),
bias=True)
def __call__(self, batch):
src_tokens = batch['inputs']
lengths = (src_tokens != self.pad).sum(dim=1)
x = self.embed(src_tokens)
for layer in self.layers:
x = layer(x)
x = torch.nn.functional.adaptive_max_pool2d(
x,
(1, 2 * self.dim_hid if self.bidirectional else self.dim_hid))
x = x.squeeze(1)
x = self.out(x)
return x
| import numpy as np
import torch
from bunruija.classifiers.classifier import NeuralBaseClassifier
class QRNNLayer(torch.nn.Module):
def __init__(self, input_size, output_size, window_size=2, bidirectional=True):
super().__init__()
self.num_gates = 3
self.window_size = window_size
self.input_size = input_size
self.output_size = output_size
self.bidirectional = bidirectional
if self.bidirectional:
self.fc = torch.nn.Linear(
self.window_size * input_size,
2 * output_size * self.num_gates)
else:
self.fc = torch.nn.Linear(
self.window_size * input_size,
output_size * self.num_gates)
def forward(self, x):
bsz = x.size(0)
seq_len = x.size(1)
window_tokens = [x]
for i in range(self.window_size - 1):
prev_x = x[:, :-(i + 1), :]
prev_x = torch.cat(
[prev_x.new_zeros(bsz, i + 1, self.input_size), prev_x],
dim=1)
window_tokens.insert(0, prev_x)
x = torch.stack(window_tokens, dim=2)
x = x.view(bsz, seq_len, -1)
x = self.fc(x)
z, f, o = x.chunk(self.num_gates, dim=2)
z = torch.tanh(z)
f = torch.sigmoid(f)
seq_len = z.size(1)
c = torch.zeros_like(z)
if self.bidirectional:
c = c.view(bsz, seq_len, 2, self.output_size)
f = f.view(bsz, seq_len, 2, self.output_size)
z = z.view(bsz, seq_len, 2, self.output_size)
for t in range(seq_len):
if t == 0:
c[:, t, 0] = (1 - f[:, t, 0]) * z[:, t, 0]
else:
c[:, t, 0] = f[:, t, 0] * c[:, t - 1, 0].clone() + (1 - f[:, t, 0]) * z[:, t, 0]
for t in range(seq_len - 1, -1, -1):
if t == seq_len - 1:
c[:, t, 0] = (1 - f[:, t, 0]) * z[:, t, 0]
else:
c[:, t, 0] = f[:, t, 0] * c[:, t + 1, 0].clone() + (1 - f[:, t, 0]) * z[:, t, 0]
c = c.view(bsz, seq_len, 2 * self.output_size)
else:
for t in range(seq_len):
if t == 0:
c[:, t] = (1 - f[:, t]) * z[:, t]
else:
c[:, t] = f[:, t] * c[:, t -1].clone() + (1 - f[:, t]) * z[:, t]
h = torch.sigmoid(o) * c
return h
class QRNN(NeuralBaseClassifier):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dim_emb = kwargs.get('dim_emb', 256)
self.dim_hid = kwargs.get('dim_hid', 128)
self.window_size = kwargs.get('window_size', 3)
self.layers = torch.nn.ModuleList()
self.bidirectional = kwargs.get('bidirectional', True)
num_layers = kwargs.get('num_layers', 2)
for i in range(num_layers):
if i == 0:
input_size = self.dim_emb
else:
input_size = 2 * self.dim_hid if self.bidirectional else self.dim_hid
self.layers.append(QRNNLayer(
input_size,
self.dim_hid,
window_size=self.window_size,
bidirectional=self.bidirectional))
def init_layer(self, data):
self.pad = 0
max_input_idx = 0
for data_i in data:
max_input_idx = max(max_input_idx, np.max(data_i['inputs']))
self.embed = torch.nn.Embedding(
max_input_idx + 1,
self.dim_emb,
padding_idx=0,
)
self.out = torch.nn.Linear(
2 * self.dim_hid if self.bidirectional else self.dim_hid,
len(self.labels),
bias=True)
def __call__(self, batch):
src_tokens = batch['inputs']
lengths = (src_tokens != self.pad).sum(dim=1)
x = self.embed(src_tokens)
for layer in self.layers:
x = layer(x)
x = torch.nn.functional.adaptive_max_pool2d(
x,
(1, 2 * self.dim_hid if self.bidirectional else self.dim_hid))
x = x.squeeze(1)
x = self.out(x)
return x | none | 1 | 2.746557 | 3 | |
Code-Code/CodeCompletion-token/code/model.py | ywen666/CodeXGLUE | 613 | 6619999 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True)
self.decoder = nn.Linear(nhid, ntoken)
self.criterion = nn.CrossEntropyLoss()
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.weight)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden=None, labels=None):
emb = self.encoder(input)
if hidden is not None:
output, hidden = self.rnn(emb, hidden)
else:
output, hidden = self.rnn(emb)
output = self.drop(output)
output = self.decoder(output)
# decoded = decoded.view(-1, self.ntoken)
# output = F.log_softmax(decoded, dim=1)
if labels is not None:
shift_logits = output[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return loss, output, hidden
else:
return output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True)
self.decoder = nn.Linear(nhid, ntoken)
self.criterion = nn.CrossEntropyLoss()
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.weight)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden=None, labels=None):
emb = self.encoder(input)
if hidden is not None:
output, hidden = self.rnn(emb, hidden)
else:
output, hidden = self.rnn(emb)
output = self.drop(output)
output = self.decoder(output)
# decoded = decoded.view(-1, self.ntoken)
# output = F.log_softmax(decoded, dim=1)
if labels is not None:
shift_logits = output[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return loss, output, hidden
else:
return output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid) | en | 0.668085 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. Container module with an encoder, a recurrent module, and a decoder. # Optionally tie weights as in: # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016) # https://arxiv.org/abs/1608.05859 # and # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016) # https://arxiv.org/abs/1611.01462 # decoded = decoded.view(-1, self.ntoken) # output = F.log_softmax(decoded, dim=1) | 2.946257 | 3 |
code/example code/introducing-python-master/1st_edition/intro/youtube.py | ChouHsuan-Cheng/Learning_Python_Base | 0 | 6620000 | <filename>code/example code/introducing-python-master/1st_edition/intro/youtube.py
import json
from urllib.request import urlopen
url = "https://gdata.youtube.com/feeds/api/standardfeeds/top_rated?alt=json"
response = urlopen(url)
contents = response.read()
text = contents.decode('utf8')
data = json.loads(text)
for video in data['feed']['entry'][0:6]:
print(video['title']['$t'])
| <filename>code/example code/introducing-python-master/1st_edition/intro/youtube.py
import json
from urllib.request import urlopen
url = "https://gdata.youtube.com/feeds/api/standardfeeds/top_rated?alt=json"
response = urlopen(url)
contents = response.read()
text = contents.decode('utf8')
data = json.loads(text)
for video in data['feed']['entry'][0:6]:
print(video['title']['$t'])
| none | 1 | 3.369071 | 3 | |
tests/test_authentication.py | jamiereid/mhvdb2 | 2 | 6620001 | <reponame>jamiereid/mhvdb2<filename>tests/test_authentication.py
from mhvdb2 import app
from mhvdb2.admin import authentication
from mhvdb2.admin.authentication import User
import unittest
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.name = "<NAME>"
self.email = "<EMAIL>"
self.password = "<PASSWORD>"
User.delete().where(User.email == self.email).execute()
def test_register(self):
errors = authentication.register_user(self.name, self.email, self.password)
user = User.get(User.email == self.email)
self.assertEqual(len(errors), 0)
self.assertEqual(user.name, self.name)
self.assertEqual(user.email, self.email)
def test_register_no_name(self):
errors = authentication.register_user("", self.email, self.password)
self.assertEqual(len(errors), 1)
def test_register_no_email(self):
errors = authentication.register_user(self.name, "", self.password)
self.assertEqual(len(errors), 1)
def test_register_bad_password(self):
errors = authentication.register_user(self.name, self.email, "")
self.assertEqual(len(errors), 1)
errors = authentication.register_user(self.name, self.email, "12345")
self.assertEqual(len(errors), 1)
def test_authenticate_user(self):
with app.test_request_context():
authentication.register_user(self.name, self.email, self.password)
self.assertTrue(authentication.authenticate_user(self.email, self.password))
self.assertFalse(authentication.authenticate_user(self.email, "<PASSWORD>"))
self.assertFalse(authentication.authenticate_user("<EMAIL>", self.password))
User.delete().where(User.email == self.email)
if __name__ == '__main__':
unittest.main()
| from mhvdb2 import app
from mhvdb2.admin import authentication
from mhvdb2.admin.authentication import User
import unittest
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.name = "<NAME>"
self.email = "<EMAIL>"
self.password = "<PASSWORD>"
User.delete().where(User.email == self.email).execute()
def test_register(self):
errors = authentication.register_user(self.name, self.email, self.password)
user = User.get(User.email == self.email)
self.assertEqual(len(errors), 0)
self.assertEqual(user.name, self.name)
self.assertEqual(user.email, self.email)
def test_register_no_name(self):
errors = authentication.register_user("", self.email, self.password)
self.assertEqual(len(errors), 1)
def test_register_no_email(self):
errors = authentication.register_user(self.name, "", self.password)
self.assertEqual(len(errors), 1)
def test_register_bad_password(self):
errors = authentication.register_user(self.name, self.email, "")
self.assertEqual(len(errors), 1)
errors = authentication.register_user(self.name, self.email, "12345")
self.assertEqual(len(errors), 1)
def test_authenticate_user(self):
with app.test_request_context():
authentication.register_user(self.name, self.email, self.password)
self.assertTrue(authentication.authenticate_user(self.email, self.password))
self.assertFalse(authentication.authenticate_user(self.email, "<PASSWORD>"))
self.assertFalse(authentication.authenticate_user("<EMAIL>", self.password))
User.delete().where(User.email == self.email)
if __name__ == '__main__':
unittest.main() | none | 1 | 2.9673 | 3 | |
scripts/fix_selected_text.py | marco262/my_wiki | 1 | 6620002 | import re
import sys
import clipboard
if len(sys.argv) == 1:
raise Exception("Pass in a function to run, you goddamn moron")
text = clipboard.paste()
if sys.argv[1] == "create_wiki_spell_table":
text = text.lower().replace("\r", "")
text = re.sub(r"^", "|| ", text, flags=re.MULTILINE)
text = re.sub(r" \s+", " || _[[[spell:", text)
text = re.sub(r", ", "]]]_, _[[[spell:", text)
text = re.sub(r"$", "]]]_ ||", text, flags=re.MULTILINE)
elif sys.argv[1] == "create_wiki_table":
text = text.replace("\r", "")
text = re.sub(r"^", "|| ", text, flags=re.MULTILINE)
text = re.sub(r"( \s+|\t+)", " || ", text)
text = re.sub(r"$", " ||@", text, flags=re.MULTILINE)
first_line = text.split("\n")[0]
first_line_repl = first_line.replace("|| ", "||~ ")
text = text.replace(first_line, first_line_repl)
elif sys.argv[1] == "fix_line_breaks":
text = re.sub(r"’", "'", text)
text = re.sub(r"“", '"', text)
text = re.sub(r"”", '"', text)
text = re.sub(r"[ϐϔ]", "f", text)
text = re.sub(r"●", "*", text)
text = re.sub(r"¦", "fi", text)
text = re.sub(r"§", "fl", text)
text = re.sub(r"¨", "ffi", text)
text = re.sub(r"©", "ffi", text)
text = re.sub(r" ?— ?", " -- ", text)
text = re.sub(r"\r?\n", " ", text)
text = re.sub(r"\s+", " ", text)
text = re.sub(r"@ ", "@", text)
text = re.sub(r"@", "\n", text)
elif sys.argv[1] == "add_spell":
text = text.lower()
text = re.sub(r"^", "_[[[spell:", text)
text = re.sub(r"$", "]]]_", text)
text = re.sub(r", ?", "]]]_, _[[[spell:", text)
elif sys.argv[1] == "add_special_formatting":
for m in re.finditer(r'^(.*?),', text, re.MULTILINE):
title = m.group(1).capitalize()
for n in re.finditer(r" ([a-z])", title):
title = title.replace(n.group(0), f" {n.group(1).upper()}")
for n in re.finditer(r"\(([a-z])", title):
title = title.replace(n.group(0), f"({n.group(1).upper()}")
text = text.replace(m.group(0), f'{title},')
else:
raise Exception("Unknown function: {}".format(sys.argv[1]))
print(text)
clipboard.copy(text)
| import re
import sys
import clipboard
if len(sys.argv) == 1:
raise Exception("Pass in a function to run, you goddamn moron")
text = clipboard.paste()
if sys.argv[1] == "create_wiki_spell_table":
text = text.lower().replace("\r", "")
text = re.sub(r"^", "|| ", text, flags=re.MULTILINE)
text = re.sub(r" \s+", " || _[[[spell:", text)
text = re.sub(r", ", "]]]_, _[[[spell:", text)
text = re.sub(r"$", "]]]_ ||", text, flags=re.MULTILINE)
elif sys.argv[1] == "create_wiki_table":
text = text.replace("\r", "")
text = re.sub(r"^", "|| ", text, flags=re.MULTILINE)
text = re.sub(r"( \s+|\t+)", " || ", text)
text = re.sub(r"$", " ||@", text, flags=re.MULTILINE)
first_line = text.split("\n")[0]
first_line_repl = first_line.replace("|| ", "||~ ")
text = text.replace(first_line, first_line_repl)
elif sys.argv[1] == "fix_line_breaks":
text = re.sub(r"’", "'", text)
text = re.sub(r"“", '"', text)
text = re.sub(r"”", '"', text)
text = re.sub(r"[ϐϔ]", "f", text)
text = re.sub(r"●", "*", text)
text = re.sub(r"¦", "fi", text)
text = re.sub(r"§", "fl", text)
text = re.sub(r"¨", "ffi", text)
text = re.sub(r"©", "ffi", text)
text = re.sub(r" ?— ?", " -- ", text)
text = re.sub(r"\r?\n", " ", text)
text = re.sub(r"\s+", " ", text)
text = re.sub(r"@ ", "@", text)
text = re.sub(r"@", "\n", text)
elif sys.argv[1] == "add_spell":
text = text.lower()
text = re.sub(r"^", "_[[[spell:", text)
text = re.sub(r"$", "]]]_", text)
text = re.sub(r", ?", "]]]_, _[[[spell:", text)
elif sys.argv[1] == "add_special_formatting":
for m in re.finditer(r'^(.*?),', text, re.MULTILINE):
title = m.group(1).capitalize()
for n in re.finditer(r" ([a-z])", title):
title = title.replace(n.group(0), f" {n.group(1).upper()}")
for n in re.finditer(r"\(([a-z])", title):
title = title.replace(n.group(0), f"({n.group(1).upper()}")
text = text.replace(m.group(0), f'{title},')
else:
raise Exception("Unknown function: {}".format(sys.argv[1]))
print(text)
clipboard.copy(text)
| none | 1 | 2.939 | 3 | |
lab3.py | nathanslack/csc121 | 0 | 6620003 | <gh_stars>0
import arcade
arcade.open_window(600, 600, "A N E W H O P E")
arcade.set_background_color(arcade.color.LIGHT_PINK)
arcade.start_render()
# START//
# GRADIENT IN THE SKY //
arcade.draw_rectangle_filled(
300, 410, 600, 25, arcade.color.LAVENDER_PINK
)
arcade.draw_rectangle_filled(
300, 390, 600, 25, arcade.color.LIGHT_ORCHID
)
arcade.draw_rectangle_filled(
300, 375, 600, 25, arcade.color.LILAC
)
arcade.draw_rectangle_filled(
300, 340, 600, 50, arcade.color.LIGHT_MEDIUM_ORCHID
)
# DRAWING THE GROUND //
arcade.draw_rectangle_filled(
300, 100, 600, 400, arcade.color.SAND
)
arcade.draw_ellipse_filled(
100, 300, 600, 100, arcade.color.SAND
)
arcade.draw_ellipse_filled(
375, 300, 600, 100, arcade.color.SAND
)
# DRAWING THE BINARY SUNSET //
arcade.draw_circle_filled(
401, 474, 25, arcade.color.RED_ORANGE
)
arcade.draw_circle_filled(
400, 475, 25, arcade.color.WHITE_SMOKE
)
arcade.draw_circle_filled(
500, 400, 25, arcade.color.RED_ORANGE
)
# DRAWING THE HUT //
arcade.draw_rectangle_outline(
260, 250, 125, 125, arcade.color.BLACK_BEAN
)
arcade.draw_circle_filled(
100, 250, 175, arcade.color.SAND
)
arcade.draw_circle_outline(
100, 250, 175, arcade.color.BLACK_BEAN
)
arcade.draw_circle_outline(
115, 260, 75, arcade.color.BLACK_BEAN
)
arcade.draw_rectangle_filled(
75, 150, 500, 225, arcade.color.SAND
)
arcade.draw_rectangle_outline(
75, 200, 525, 125, arcade.color.BLACK_BEAN
)
# THING IN DISTANCE //
arcade.draw_rectangle_filled(
550, 350, 10, 50, arcade.color.ONYX
)
arcade.draw_rectangle_filled(
545, 338, 5, 25, arcade.color.ONYX
)
arcade.draw_rectangle_filled(
556, 338, 5, 25, arcade.color.ONYX
)
arcade.draw_rectangle_filled(
550, 375, 2, 30, arcade.color.ONYX
)
# TEXT //
arcade.draw_text(
"A", 75, 550, arcade.color.JET
)
arcade.draw_text(
"N E W", 62, 525, arcade.color.JET
)
arcade.draw_text(
"<NAME>", 58, 500, arcade.color.JET
)
arcade.finish_render()
arcade.run() | import arcade
arcade.open_window(600, 600, "A N E W H O P E")
arcade.set_background_color(arcade.color.LIGHT_PINK)
arcade.start_render()
# START//
# GRADIENT IN THE SKY //
arcade.draw_rectangle_filled(
300, 410, 600, 25, arcade.color.LAVENDER_PINK
)
arcade.draw_rectangle_filled(
300, 390, 600, 25, arcade.color.LIGHT_ORCHID
)
arcade.draw_rectangle_filled(
300, 375, 600, 25, arcade.color.LILAC
)
arcade.draw_rectangle_filled(
300, 340, 600, 50, arcade.color.LIGHT_MEDIUM_ORCHID
)
# DRAWING THE GROUND //
arcade.draw_rectangle_filled(
300, 100, 600, 400, arcade.color.SAND
)
arcade.draw_ellipse_filled(
100, 300, 600, 100, arcade.color.SAND
)
arcade.draw_ellipse_filled(
375, 300, 600, 100, arcade.color.SAND
)
# DRAWING THE BINARY SUNSET //
arcade.draw_circle_filled(
401, 474, 25, arcade.color.RED_ORANGE
)
arcade.draw_circle_filled(
400, 475, 25, arcade.color.WHITE_SMOKE
)
arcade.draw_circle_filled(
500, 400, 25, arcade.color.RED_ORANGE
)
# DRAWING THE HUT //
arcade.draw_rectangle_outline(
260, 250, 125, 125, arcade.color.BLACK_BEAN
)
arcade.draw_circle_filled(
100, 250, 175, arcade.color.SAND
)
arcade.draw_circle_outline(
100, 250, 175, arcade.color.BLACK_BEAN
)
arcade.draw_circle_outline(
115, 260, 75, arcade.color.BLACK_BEAN
)
arcade.draw_rectangle_filled(
75, 150, 500, 225, arcade.color.SAND
)
arcade.draw_rectangle_outline(
75, 200, 525, 125, arcade.color.BLACK_BEAN
)
# THING IN DISTANCE //
arcade.draw_rectangle_filled(
550, 350, 10, 50, arcade.color.ONYX
)
arcade.draw_rectangle_filled(
545, 338, 5, 25, arcade.color.ONYX
)
arcade.draw_rectangle_filled(
556, 338, 5, 25, arcade.color.ONYX
)
arcade.draw_rectangle_filled(
550, 375, 2, 30, arcade.color.ONYX
)
# TEXT //
arcade.draw_text(
"A", 75, 550, arcade.color.JET
)
arcade.draw_text(
"N E W", 62, 525, arcade.color.JET
)
arcade.draw_text(
"<NAME>", 58, 500, arcade.color.JET
)
arcade.finish_render()
arcade.run() | ja | 0.399609 | # START// # GRADIENT IN THE SKY // # DRAWING THE GROUND // # DRAWING THE BINARY SUNSET // # DRAWING THE HUT // # THING IN DISTANCE // # TEXT // | 2.33449 | 2 |
multiprocessing_daemon.py | fwang29/Parallel_Programming | 0 | 6620004 | import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print 'starting:', p.name, p.pid
sys.stdout.flush()
time.sleep(2)
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print 'starting:', p.name, p.pid
sys.stdout.flush()
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
if __name__ == '__main__':
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non_daemon', target=non_daemon)
n.daemon = False
d.start()
time.sleep(1)
n.start()
| import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print 'starting:', p.name, p.pid
sys.stdout.flush()
time.sleep(2)
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print 'starting:', p.name, p.pid
sys.stdout.flush()
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
if __name__ == '__main__':
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non_daemon', target=non_daemon)
n.daemon = False
d.start()
time.sleep(1)
n.start()
| none | 1 | 2.820114 | 3 | |
EX059.py | gdsouza08/desafios---Python | 0 | 6620005 | from time import sleep
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
opcao = 0
print('-=-' * 10)
print('''O que deseja fazer:
[1] Somar
[2] Multiplicar
[3] Maior Valor
[4] Novos Números
[5] Sair do Programa''')
print('-=-' * 10)
while opcao != 5:
opcao = int(input('>>>>>> Qual é a sua opção? '))
if opcao == 1:
soma = n1 + n2
print('A soma entre os números resulta em {}'.format(soma))
elif opcao == 2:
multiplicar = n1 * n2
print('{} x {} é {}'.format(n1, n2, multiplicar))
elif opcao == 3:
print('O maior dos números é {}'.format(max(n1, n2)))
elif opcao == 4:
novo = int(input('Digite um novo valor: '))
novo1 = int(input('Digite um novo valor: '))
elif opcao == 5:
print('Fechando programa!')
sleep(3)
exit('Obrigado por utilizar o sistema Curso em Vídeo!!!')
else:
print('Opção inválida! Selecione uma das opções acima.')
| from time import sleep
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
opcao = 0
print('-=-' * 10)
print('''O que deseja fazer:
[1] Somar
[2] Multiplicar
[3] Maior Valor
[4] Novos Números
[5] Sair do Programa''')
print('-=-' * 10)
while opcao != 5:
opcao = int(input('>>>>>> Qual é a sua opção? '))
if opcao == 1:
soma = n1 + n2
print('A soma entre os números resulta em {}'.format(soma))
elif opcao == 2:
multiplicar = n1 * n2
print('{} x {} é {}'.format(n1, n2, multiplicar))
elif opcao == 3:
print('O maior dos números é {}'.format(max(n1, n2)))
elif opcao == 4:
novo = int(input('Digite um novo valor: '))
novo1 = int(input('Digite um novo valor: '))
elif opcao == 5:
print('Fechando programa!')
sleep(3)
exit('Obrigado por utilizar o sistema Curso em Vídeo!!!')
else:
print('Opção inválida! Selecione uma das opções acima.')
| pt | 0.721995 | O que deseja fazer:
[1] Somar
[2] Multiplicar
[3] Maior Valor
[4] Novos Números
[5] Sair do Programa | 3.964844 | 4 |
examples/contact_area.py | Pandinosaurus/PyTouch | 149 | 6620006 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pytouch
from pytouch.handlers import ImageHandler
from pytouch.sensors import DigitSensor
from pytouch.tasks import ContactArea
def extract_surface_contact():
base_img_path = "./path/to/img/"
sample_img_path = "./path/to/img"
base_img = ImageHandler(base_img_path).nparray
sample_img = ImageHandler(sample_img_path).nparray
sample_img_2 = sample_img.copy()
# initialize with default configuration of ContactArea task
pt = pytouch.PyTouch(DigitSensor, tasks=[ContactArea])
major, minor = pt.ContactArea(sample_img, base=base_img)
print("Major Axis: {0}, minor axis: {1}".format(*major, *minor))
ImageHandler.save("surface_contact_1.png", sample_img)
# initialize with custom configuration of ContactArea task
contact_area = ContactArea(base=base_img, contour_threshold=10)
major, minor = contact_area(sample_img_2)
print("Major Axis: {0}, minor axis: {1}".format(*major, *minor))
ImageHandler.save("surface_contact_2.png", sample_img_2)
if __name__ == "__main__":
extract_surface_contact()
| # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pytouch
from pytouch.handlers import ImageHandler
from pytouch.sensors import DigitSensor
from pytouch.tasks import ContactArea
def extract_surface_contact():
base_img_path = "./path/to/img/"
sample_img_path = "./path/to/img"
base_img = ImageHandler(base_img_path).nparray
sample_img = ImageHandler(sample_img_path).nparray
sample_img_2 = sample_img.copy()
# initialize with default configuration of ContactArea task
pt = pytouch.PyTouch(DigitSensor, tasks=[ContactArea])
major, minor = pt.ContactArea(sample_img, base=base_img)
print("Major Axis: {0}, minor axis: {1}".format(*major, *minor))
ImageHandler.save("surface_contact_1.png", sample_img)
# initialize with custom configuration of ContactArea task
contact_area = ContactArea(base=base_img, contour_threshold=10)
major, minor = contact_area(sample_img_2)
print("Major Axis: {0}, minor axis: {1}".format(*major, *minor))
ImageHandler.save("surface_contact_2.png", sample_img_2)
if __name__ == "__main__":
extract_surface_contact()
| en | 0.789855 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # initialize with default configuration of ContactArea task # initialize with custom configuration of ContactArea task | 2.774205 | 3 |
python/number_of_islands.py | babibo180918/leetcode | 0 | 6620007 | class UnionFind(object):
def __init__(self, size):
self.ids = [-1 for i in range(size)]
self.weights = [1 for i in range(size)]
def root(self, index):
while self.ids[index] != -1:
index = self.ids[index]
return index
def union(self, from_idx, to_idx):
root_to = self.root(to_idx)
root_from = self.root(from_idx)
if root_to != root_from:
if self.weights[root_to] > self.weights[root_from]:
self.ids[root_from] = root_to
self.weights[root_to] += self.weights[root_from]
else:
self.ids[root_to] = root_from
self.weights[root_from] += self.weights[root_to]
return True
return False
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
total = 0
union_count = 0
rows = len(grid)
if rows == 0: return 0
cols = len(grid[0])
if cols == 0: return 0
uf = UnionFind(cols * rows)
for i in range(rows):
for j in range(cols):
if grid[i][j] == '1':
total += 1
cur_root = uf.root(i * cols + j)
if j < cols - 1 and grid[i][j + 1] == '1':
if uf.union(i * cols + j, i * cols + j + 1):
union_count += 1
if i < rows - 1 and grid[i + 1][j] == '1':
if uf.union(i * cols + j, (i + 1) * cols + j):
union_count += 1
return total - union_count
sol = Solution()
lst = [['1','1','1','1','0'],
['1','1','0','1','0'],
['1','1','0','0','0'],
['0','0','0','0','0']]
print(sol.numIslands(lst) == 1)
lst = [['1','1','0','0','0'],
['1','1','0','0','0'],
['0','0','1','0','0'],
['0','0','0','1','1']]
print(sol.numIslands(lst) == 3)
lst = [['1'],
['1']]
print(sol.numIslands(lst) == 1)
lst = [['1'],
['0'],
['1']]
print(sol.numIslands(lst) == 2)
lst = []
print(sol.numIslands(lst) == 0)
lst = [["1","1","1"],["0","1","0"],["1","1","1"]]
print(sol.numIslands(lst) == 1)
| class UnionFind(object):
def __init__(self, size):
self.ids = [-1 for i in range(size)]
self.weights = [1 for i in range(size)]
def root(self, index):
while self.ids[index] != -1:
index = self.ids[index]
return index
def union(self, from_idx, to_idx):
root_to = self.root(to_idx)
root_from = self.root(from_idx)
if root_to != root_from:
if self.weights[root_to] > self.weights[root_from]:
self.ids[root_from] = root_to
self.weights[root_to] += self.weights[root_from]
else:
self.ids[root_to] = root_from
self.weights[root_from] += self.weights[root_to]
return True
return False
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
total = 0
union_count = 0
rows = len(grid)
if rows == 0: return 0
cols = len(grid[0])
if cols == 0: return 0
uf = UnionFind(cols * rows)
for i in range(rows):
for j in range(cols):
if grid[i][j] == '1':
total += 1
cur_root = uf.root(i * cols + j)
if j < cols - 1 and grid[i][j + 1] == '1':
if uf.union(i * cols + j, i * cols + j + 1):
union_count += 1
if i < rows - 1 and grid[i + 1][j] == '1':
if uf.union(i * cols + j, (i + 1) * cols + j):
union_count += 1
return total - union_count
sol = Solution()
lst = [['1','1','1','1','0'],
['1','1','0','1','0'],
['1','1','0','0','0'],
['0','0','0','0','0']]
print(sol.numIslands(lst) == 1)
lst = [['1','1','0','0','0'],
['1','1','0','0','0'],
['0','0','1','0','0'],
['0','0','0','1','1']]
print(sol.numIslands(lst) == 3)
lst = [['1'],
['1']]
print(sol.numIslands(lst) == 1)
lst = [['1'],
['0'],
['1']]
print(sol.numIslands(lst) == 2)
lst = []
print(sol.numIslands(lst) == 0)
lst = [["1","1","1"],["0","1","0"],["1","1","1"]]
print(sol.numIslands(lst) == 1)
| en | 0.285332 | :type grid: List[List[str]] :rtype: int | 3.22819 | 3 |
suit_tool/get_pubkey.py | bergzand/suit-manifest-generator | 16 | 6620008 | <filename>suit_tool/get_pubkey.py
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import textwrap
import binascii
import pyhsslms
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric import utils as asymmetric_utils
from cryptography.hazmat.primitives import serialization as ks
def to_uecc_pubkey(pk):
if not isinstance(pk, ec.EllipticCurvePrivateKey):
raise Exception('Private key of type {} is not supported'.format(type(pk)))
public_numbers = pk.public_key().public_numbers()
x = public_numbers.x
y = public_numbers.y
uecc_bytes = x.to_bytes(
(x.bit_length() + 7) // 8, byteorder='big'
) + y.to_bytes(
(y.bit_length() + 7) // 8, byteorder='big'
)
uecc_c_def = ['const uint8_t public_key[] = {'] + textwrap.wrap(
', '.join(['{:0=#4x}'.format(x) for x in uecc_bytes]),
76
)
return '\n '.join(uecc_c_def) + '\n};\n'
OutputFormaters = {
'uecc' : to_uecc_pubkey,
'pem' : lambda pk: pk.public_key().public_bytes(ks.Encoding.PEM, ks.PublicFormat.SubjectPublicKeyInfo),
'der' : lambda pk: pk.public_key().public_bytes(ks.Encoding.DER, ks.PublicFormat.SubjectPublicKeyInfo),
'hsslms' : lambda pk: pk.publicKey().serialize(),
'c-hsslms' : lambda pk: ('\n '.join(['const uint8_t hsslms_public_key[] = {'] + textwrap.wrap(
', '.join(['{:0=#4x}'.format(x) for x in pk.publicKey().serialize()]),
76
)) + '\n};\n').encode('utf-8')
}
def main(options):
private_key = None
# This test is here because the cryptography module doesn't know about hss-lms keys
if options.output_format in ('pem', 'der', 'uecc'):
private_key = ks.load_pem_private_key(
options.private_key.read(),
password=<PASSWORD>,
backend=default_backend()
)
elif options.output_format in ('c-hsslms', 'hsslms'):
private_key = pyhsslms.HssPrivateKey.deserialize(options.private_key.read())
odata = OutputFormaters.get(options.output_format)(private_key)
if options.output_file.isatty():
try:
odata = odata.decode('utf-8')
except:
odata = binascii.b2a_hex(odata).decode('utf-8')
odata = '\n'.join(
[line for lines in [textwrap.wrap(line, 80)
for line in odata.split('\n')] for line in lines]
) + '\n'
options.output_file.write(odata)
return 0
| <filename>suit_tool/get_pubkey.py
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import textwrap
import binascii
import pyhsslms
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric import utils as asymmetric_utils
from cryptography.hazmat.primitives import serialization as ks
def to_uecc_pubkey(pk):
if not isinstance(pk, ec.EllipticCurvePrivateKey):
raise Exception('Private key of type {} is not supported'.format(type(pk)))
public_numbers = pk.public_key().public_numbers()
x = public_numbers.x
y = public_numbers.y
uecc_bytes = x.to_bytes(
(x.bit_length() + 7) // 8, byteorder='big'
) + y.to_bytes(
(y.bit_length() + 7) // 8, byteorder='big'
)
uecc_c_def = ['const uint8_t public_key[] = {'] + textwrap.wrap(
', '.join(['{:0=#4x}'.format(x) for x in uecc_bytes]),
76
)
return '\n '.join(uecc_c_def) + '\n};\n'
OutputFormaters = {
'uecc' : to_uecc_pubkey,
'pem' : lambda pk: pk.public_key().public_bytes(ks.Encoding.PEM, ks.PublicFormat.SubjectPublicKeyInfo),
'der' : lambda pk: pk.public_key().public_bytes(ks.Encoding.DER, ks.PublicFormat.SubjectPublicKeyInfo),
'hsslms' : lambda pk: pk.publicKey().serialize(),
'c-hsslms' : lambda pk: ('\n '.join(['const uint8_t hsslms_public_key[] = {'] + textwrap.wrap(
', '.join(['{:0=#4x}'.format(x) for x in pk.publicKey().serialize()]),
76
)) + '\n};\n').encode('utf-8')
}
def main(options):
private_key = None
# This test is here because the cryptography module doesn't know about hss-lms keys
if options.output_format in ('pem', 'der', 'uecc'):
private_key = ks.load_pem_private_key(
options.private_key.read(),
password=<PASSWORD>,
backend=default_backend()
)
elif options.output_format in ('c-hsslms', 'hsslms'):
private_key = pyhsslms.HssPrivateKey.deserialize(options.private_key.read())
odata = OutputFormaters.get(options.output_format)(private_key)
if options.output_file.isatty():
try:
odata = odata.decode('utf-8')
except:
odata = binascii.b2a_hex(odata).decode('utf-8')
odata = '\n'.join(
[line for lines in [textwrap.wrap(line, 80)
for line in odata.split('\n')] for line in lines]
) + '\n'
options.output_file.write(odata)
return 0
| en | 0.745543 | # -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Copyright 2020 ARM Limited or its affiliates # # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- #4x}'.format(x) for x in uecc_bytes]), #4x}'.format(x) for x in pk.publicKey().serialize()]), # This test is here because the cryptography module doesn't know about hss-lms keys | 1.935999 | 2 |
tests/test_cli.py | desbma/raiseorlaunch | 37 | 6620009 | <reponame>desbma/raiseorlaunch
import sys
from argparse import ArgumentParser, ArgumentTypeError, Namespace
import pytest
from raiseorlaunch import Raiseorlaunch, __main__ as main
def test_arguments_all(default_args_cli, mocker):
initial_args = [
"--class",
"Coolapp",
"--instance",
"instance",
"--title",
"title",
"--workspace",
"CA",
"--event-time-limit",
"7",
"--ignore-case",
"--cycle",
"--leave-fullscreen",
"--debug",
]
default_args_cli.update(
{
"wm_class": "Coolapp",
"wm_instance": "instance",
"wm_title": "title",
"command": "coolapp",
"workspace": "CA",
"event_time_limit": 7.0,
"ignore_case": True,
"cycle": True,
"leave_fullscreen": True,
"debug": True,
}
)
mocker.patch.object(main.spawn, "find_executable", return_value=True)
expected_args = Namespace(**default_args_cli)
args = main.parse_arguments(initial_args)[0]
assert args == expected_args
def test_verify_app(mocker):
mocker.patch.object(ArgumentParser, "error")
assert main.verify_app(ArgumentParser(), "not an executable") == "not an executable"
ArgumentParser.error.assert_called_with(
'"not an executable" is not an executable! Did you forget to supply -e?'
)
def test_set_command_provided(mocker, default_args_cli):
default_args_cli.update({"command": "ls"})
args = Namespace(**default_args_cli)
assert main.set_command(ArgumentParser(), args) == args
def test_set_command_no_executable(mocker, default_args_cli):
args = Namespace(**default_args_cli)
mocker.patch.object(ArgumentParser, "error", side_effect=Exception("mocked error"))
with pytest.raises(Exception) as excinfo:
main.set_command(ArgumentParser(), args)
assert str(excinfo.value) == "mocked error"
ArgumentParser.error.assert_called_with("No executable provided!")
def test_check_time_limit():
assert main.check_time_limit("3.0") == 3.0
assert main.check_time_limit("5") == 5.0
assert main.check_time_limit("13.56") == 13.56
with pytest.raises(ArgumentTypeError) as excinfo:
main.check_time_limit("not a float")
assert str(excinfo.value) == "event-time-limit is not a positive integer or float!"
def test_main(mocker, sys_argv_handler):
mocker.patch.object(main.spawn, "find_executable", return_value=True)
mocker.patch.object(Raiseorlaunch, "__init__")
mocker.patch.object(Raiseorlaunch, "run")
Raiseorlaunch.__init__.return_value = None
sys.argv = ["__main__.py", "-c", "coolapp", "-d"]
main.main()
def test_main_exception(mocker, sys_argv_handler):
def side_effect(parser, args):
return args
mocker.patch("raiseorlaunch.__main__.set_command", side_effect=side_effect)
sys.argv = ["__main__.py"]
with pytest.raises(SystemExit) as excinfo:
main.main()
assert excinfo.type == SystemExit
assert str(excinfo.value) == "2"
| import sys
from argparse import ArgumentParser, ArgumentTypeError, Namespace
import pytest
from raiseorlaunch import Raiseorlaunch, __main__ as main
def test_arguments_all(default_args_cli, mocker):
initial_args = [
"--class",
"Coolapp",
"--instance",
"instance",
"--title",
"title",
"--workspace",
"CA",
"--event-time-limit",
"7",
"--ignore-case",
"--cycle",
"--leave-fullscreen",
"--debug",
]
default_args_cli.update(
{
"wm_class": "Coolapp",
"wm_instance": "instance",
"wm_title": "title",
"command": "coolapp",
"workspace": "CA",
"event_time_limit": 7.0,
"ignore_case": True,
"cycle": True,
"leave_fullscreen": True,
"debug": True,
}
)
mocker.patch.object(main.spawn, "find_executable", return_value=True)
expected_args = Namespace(**default_args_cli)
args = main.parse_arguments(initial_args)[0]
assert args == expected_args
def test_verify_app(mocker):
mocker.patch.object(ArgumentParser, "error")
assert main.verify_app(ArgumentParser(), "not an executable") == "not an executable"
ArgumentParser.error.assert_called_with(
'"not an executable" is not an executable! Did you forget to supply -e?'
)
def test_set_command_provided(mocker, default_args_cli):
default_args_cli.update({"command": "ls"})
args = Namespace(**default_args_cli)
assert main.set_command(ArgumentParser(), args) == args
def test_set_command_no_executable(mocker, default_args_cli):
args = Namespace(**default_args_cli)
mocker.patch.object(ArgumentParser, "error", side_effect=Exception("mocked error"))
with pytest.raises(Exception) as excinfo:
main.set_command(ArgumentParser(), args)
assert str(excinfo.value) == "mocked error"
ArgumentParser.error.assert_called_with("No executable provided!")
def test_check_time_limit():
assert main.check_time_limit("3.0") == 3.0
assert main.check_time_limit("5") == 5.0
assert main.check_time_limit("13.56") == 13.56
with pytest.raises(ArgumentTypeError) as excinfo:
main.check_time_limit("not a float")
assert str(excinfo.value) == "event-time-limit is not a positive integer or float!"
def test_main(mocker, sys_argv_handler):
mocker.patch.object(main.spawn, "find_executable", return_value=True)
mocker.patch.object(Raiseorlaunch, "__init__")
mocker.patch.object(Raiseorlaunch, "run")
Raiseorlaunch.__init__.return_value = None
sys.argv = ["__main__.py", "-c", "coolapp", "-d"]
main.main()
def test_main_exception(mocker, sys_argv_handler):
def side_effect(parser, args):
return args
mocker.patch("raiseorlaunch.__main__.set_command", side_effect=side_effect)
sys.argv = ["__main__.py"]
with pytest.raises(SystemExit) as excinfo:
main.main()
assert excinfo.type == SystemExit
assert str(excinfo.value) == "2" | none | 1 | 2.460176 | 2 | |
tests/test_list.py | Resi-Coders/cv | 2 | 6620010 | <filename>tests/test_list.py
from easycv import List
from easycv.transforms import GrayScale, Blur
testlist = List.random(2)
lazy_test_list = List.random(2, lazy=True)
def test_random():
test_list = testlist.copy()
assert len(test_list) == 2
def test_start_shutdown():
List.start()
List.shutdown()
def test_index():
test_list = testlist.copy()
assert len(test_list[:1]) == 1
def test_apply():
test_list = testlist.copy()
t = test_list.apply(GrayScale())
assert len(t) == 2
t2 = t.copy()
t2.apply(Blur(), in_place=True)
assert id(t) != id(t2)
def test_compute():
test_list = lazy_test_list.copy()
assert not test_list[0].loaded
test_list.apply(Blur(), in_place=True)
assert not test_list[0].loaded
assert test_list[0].pending.num_transforms() == 1
test_list.compute(in_place=True)
assert test_list[0].pending.num_transforms() == 0
assert len(test_list) == 2
def parallel():
test_list = testlist.copy()
t = test_list.apply(GrayScale(), parallel=True)
assert len(t) == 2
t2 = t.copy()
t2.apply(Blur(), in_place=True, parallel=True)
assert id(t) != id(t2)
test_list = lazy_test_list.copy()
assert not test_list[0].loaded
test_list.apply(Blur(), in_place=True, parallel=True)
assert not test_list[0].loaded
assert test_list[0].pending.num_transforms() == 1
test_list.compute(in_place=True, parallel=True)
assert test_list[0].pending.num_transforms() == 0
assert len(test_list) == 2
List.shutdown()
| <filename>tests/test_list.py
from easycv import List
from easycv.transforms import GrayScale, Blur
testlist = List.random(2)
lazy_test_list = List.random(2, lazy=True)
def test_random():
test_list = testlist.copy()
assert len(test_list) == 2
def test_start_shutdown():
List.start()
List.shutdown()
def test_index():
test_list = testlist.copy()
assert len(test_list[:1]) == 1
def test_apply():
test_list = testlist.copy()
t = test_list.apply(GrayScale())
assert len(t) == 2
t2 = t.copy()
t2.apply(Blur(), in_place=True)
assert id(t) != id(t2)
def test_compute():
test_list = lazy_test_list.copy()
assert not test_list[0].loaded
test_list.apply(Blur(), in_place=True)
assert not test_list[0].loaded
assert test_list[0].pending.num_transforms() == 1
test_list.compute(in_place=True)
assert test_list[0].pending.num_transforms() == 0
assert len(test_list) == 2
def parallel():
test_list = testlist.copy()
t = test_list.apply(GrayScale(), parallel=True)
assert len(t) == 2
t2 = t.copy()
t2.apply(Blur(), in_place=True, parallel=True)
assert id(t) != id(t2)
test_list = lazy_test_list.copy()
assert not test_list[0].loaded
test_list.apply(Blur(), in_place=True, parallel=True)
assert not test_list[0].loaded
assert test_list[0].pending.num_transforms() == 1
test_list.compute(in_place=True, parallel=True)
assert test_list[0].pending.num_transforms() == 0
assert len(test_list) == 2
List.shutdown()
| none | 1 | 2.355122 | 2 | |
utils/data/base.py | shogi880/lossyless | 61 | 6620011 | import abc
from pathlib import Path
import numpy as np
import torch
from lossyless.helpers import to_numpy
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
DIR = Path(__file__).parents[2].joinpath("data")
__all__ = ["LossylessDataset", "LossylessDataModule"]
### Base Dataset ###
class LossylessDataset(abc.ABC):
"""Base class for lossy compression but lossless predicitons.
Parameters
-----------
additional_target : {"input", "representative", "equiv_x", "target", None}, optional
Additional target to append to the target. `"input"` is the input example (i.e. augmented),
`"representative"` is a representative of the equivalence class (always the same).
`"equiv_x"` is some random equivalent x. "target" uses agin the target (i.e. duplicate).
equivalence : str or set of str, optional
Equivalence relationship with respect to which to be invariant. Depends on the dataset.
`None` means no equivalence.
is_normalize : bool, optional
Whether to normalize the data.
seed : int, optional
Pseudo random seed.
"""
def __init__(
self,
*args,
additional_target=None,
equivalence=None,
is_normalize=False,
seed=123,
**kwargs,
):
super().__init__(*args, **kwargs)
self.additional_target = additional_target
self.equivalence = equivalence
self.seed = seed
self.is_normalize = is_normalize
@abc.abstractmethod
def get_x_target_Mx(self, index):
"""Return the correct example, target, and maximal invariant."""
...
@abc.abstractmethod
def get_representative(self, Mx):
"""Return a representative element for current Mx."""
...
@abc.abstractmethod
def get_equiv_x(self, x, Mx):
"""Return some other random element from same equivalence class."""
...
@property
@abc.abstractmethod
def is_clfs(self):
"""Return a dictionary saying whether `input`, `target`, should be classified."""
...
@property
@abc.abstractmethod
def shapes(self):
"""Return dictionary giving the shape `input`, `target`."""
...
def __getitem__(self, index):
x, target, Mx = self.get_x_target_Mx(index)
if self.additional_target is None:
targets = target
else:
targets = [target]
targets += self.toadd_target(self.additional_target, x, target, Mx)
return x, targets
def toadd_target(self, additional_target, x, target, Mx):
if additional_target == "input":
to_add = [x]
elif additional_target == "representative":
# representative element from same equivalence class
to_add = [self.get_representative(Mx)]
elif additional_target == "equiv_x":
# other element from same equivalence class
to_add = [self.get_equiv_x(x, Mx)]
elif additional_target == "target":
# duplicate but makes code simpler
to_add = [target]
else:
raise ValueError(f"Unkown additional_target={additional_target}")
return to_add
def get_is_clf(self):
"""Return `is_clf` for the target and aux_target."""
is_clf = self.is_clfs
is_clf["representative"] = is_clf["input"]
is_clf["equiv_x"] = is_clf["input"]
is_clf[None] = None
return is_clf["target"], is_clf[self.additional_target]
def get_shapes(self):
"""Return `shapes` for the target and aux_target."""
shapes = self.shapes
shapes["representative"] = shapes["input"]
shapes["equiv_x"] = shapes["input"]
shapes[None] = None
return shapes["target"], shapes[self.additional_target]
### Base Datamodule ###
# cannot use abc because inheriting from lightning :( )
class LossylessDataModule(LightningDataModule):
"""Base class for data module for lossy compression but lossless predicitons.
Notes
-----
- similar to pl_bolts.datamodule.CIFAR10DataModule but more easily modifiable.
Parameters
-----------
data_dir : str, optional
Directory for saving/loading the dataset.
val_size : int or float, optional
How many examples to use for validation. This will generate new examples if possible, or
split from the training set. If float this is in ratio of training size, eg 0.1 is 10%.
test_size : int, optional
How many examples to use for test. `None` means all.
num_workers : int, optional
How many workers to use for loading data
batch_size : int, optional
Number of example per batch for training.
val_batch_size : int or None, optional
Number of example per batch during eval and test. If None uses `batch_size`.
seed : int, optional
Pseudo random seed.
reload_dataloaders_every_epoch : bool, optional
Whether to reload (all) dataloaders at each epoch.
dataset_kwargs : dict, optional
Additional arguments for the dataset.
"""
def __init__(
self,
data_dir=DIR,
val_size=0.1,
test_size=None,
num_workers=16,
batch_size=128,
val_batch_size=None,
seed=123,
reload_dataloaders_every_epoch=False,
dataset_kwargs={},
):
super().__init__()
self.data_dir = data_dir
self.val_size = val_size
self.test_size = test_size
self.num_workers = num_workers
self.batch_size = batch_size
self.val_batch_size = batch_size if val_batch_size is None else val_batch_size
self.seed = seed
self.dataset_kwargs = dataset_kwargs
self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch
@property
def Dataset(self):
"""Return the correct dataset."""
raise NotImplementedError()
def get_train_dataset(self, **dataset_kwargs):
"""Return the training dataset."""
raise NotImplementedError()
def get_val_dataset(self, **dataset_kwargs):
"""Return the validation dataset."""
raise NotImplementedError()
def get_test_dataset(self, **dataset_kwargs):
"""Return the test dataset."""
raise NotImplementedError()
def prepare_data(self):
"""Dowload and save data on file if needed."""
raise NotImplementedError()
@property
def mode(self):
"""Says what is the mode/type of data. E.g. images, distributions, ...."""
raise NotImplementedError()
@property
def dataset(self):
"""Return the underlying (train) datset ...."""
dataset = self.train_dataset
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
return dataset
def set_info_(self):
"""Sets some information from the dataset."""
dataset = self.dataset
self.target_is_clf, self.aux_is_clf = dataset.get_is_clf()
self.target_shape, self.aux_shape = dataset.get_shapes()
self.shape = dataset.shapes["input"]
self.additional_target = dataset.additional_target
@property
def balancing_weights(self):
"""Dictionary mapping every target to a weight that examples from this class should carry."""
return dict()
def setup(self, stage=None):
"""Prepare the datasets for the current stage."""
if stage == "fit" or stage is None:
self.train_dataset = self.get_train_dataset(**self.dataset_kwargs)
self.set_info_()
self.val_dataset = self.get_val_dataset(**self.dataset_kwargs)
if stage == "test" or stage is None:
self.test_dataset = self.get_test_dataset(**self.dataset_kwargs)
def train_dataloader(self, batch_size=None, train_dataset=None, **kwargs):
"""Return the training dataloader while possibly modifying dataset kwargs."""
dkwargs = kwargs.pop("dataset_kwargs", {})
if self.reload_dataloaders_every_epoch or len(dkwargs) > 0:
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
train_dataset = self.get_train_dataset(**curr_kwargs)
if train_dataset is None:
train_dataset = self.train_dataset
if batch_size is None:
batch_size = self.batch_size
return DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
**kwargs,
)
def val_dataloader(self, batch_size=None, **kwargs):
"""Return the validation dataloader while possibly modifying dataset kwargs."""
dkwargs = kwargs.pop("dataset_kwargs", {})
if self.reload_dataloaders_every_epoch or len(dkwargs) > 0:
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
self.val_dataset = self.get_val_dataset(**curr_kwargs)
if batch_size is None:
batch_size = self.val_batch_size
return DataLoader(
self.val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
**kwargs,
)
def test_dataloader(self, batch_size=None, **kwargs):
"""Return the test dataloader while possibly modifying dataset kwargs."""
dkwargs = kwargs.pop("dataset_kwargs", {})
if self.reload_dataloaders_every_epoch or len(dkwargs) > 0:
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
self.test_dataset = self.get_test_dataset(**curr_kwargs)
if batch_size is None:
batch_size = self.val_batch_size
return DataLoader(
self.test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
**kwargs,
)
def eval_dataloader(self, is_eval_on_test, **kwargs):
"""Return the evaluation dataloader (test or val)."""
if is_eval_on_test:
return self.test_dataloader(**kwargs)
else:
return self.val_dataloader(**kwargs)
| import abc
from pathlib import Path
import numpy as np
import torch
from lossyless.helpers import to_numpy
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
DIR = Path(__file__).parents[2].joinpath("data")
__all__ = ["LossylessDataset", "LossylessDataModule"]
### Base Dataset ###
class LossylessDataset(abc.ABC):
"""Base class for lossy compression but lossless predicitons.
Parameters
-----------
additional_target : {"input", "representative", "equiv_x", "target", None}, optional
Additional target to append to the target. `"input"` is the input example (i.e. augmented),
`"representative"` is a representative of the equivalence class (always the same).
`"equiv_x"` is some random equivalent x. "target" uses agin the target (i.e. duplicate).
equivalence : str or set of str, optional
Equivalence relationship with respect to which to be invariant. Depends on the dataset.
`None` means no equivalence.
is_normalize : bool, optional
Whether to normalize the data.
seed : int, optional
Pseudo random seed.
"""
def __init__(
self,
*args,
additional_target=None,
equivalence=None,
is_normalize=False,
seed=123,
**kwargs,
):
super().__init__(*args, **kwargs)
self.additional_target = additional_target
self.equivalence = equivalence
self.seed = seed
self.is_normalize = is_normalize
@abc.abstractmethod
def get_x_target_Mx(self, index):
"""Return the correct example, target, and maximal invariant."""
...
@abc.abstractmethod
def get_representative(self, Mx):
"""Return a representative element for current Mx."""
...
@abc.abstractmethod
def get_equiv_x(self, x, Mx):
"""Return some other random element from same equivalence class."""
...
@property
@abc.abstractmethod
def is_clfs(self):
"""Return a dictionary saying whether `input`, `target`, should be classified."""
...
@property
@abc.abstractmethod
def shapes(self):
"""Return dictionary giving the shape `input`, `target`."""
...
def __getitem__(self, index):
x, target, Mx = self.get_x_target_Mx(index)
if self.additional_target is None:
targets = target
else:
targets = [target]
targets += self.toadd_target(self.additional_target, x, target, Mx)
return x, targets
def toadd_target(self, additional_target, x, target, Mx):
if additional_target == "input":
to_add = [x]
elif additional_target == "representative":
# representative element from same equivalence class
to_add = [self.get_representative(Mx)]
elif additional_target == "equiv_x":
# other element from same equivalence class
to_add = [self.get_equiv_x(x, Mx)]
elif additional_target == "target":
# duplicate but makes code simpler
to_add = [target]
else:
raise ValueError(f"Unkown additional_target={additional_target}")
return to_add
def get_is_clf(self):
"""Return `is_clf` for the target and aux_target."""
is_clf = self.is_clfs
is_clf["representative"] = is_clf["input"]
is_clf["equiv_x"] = is_clf["input"]
is_clf[None] = None
return is_clf["target"], is_clf[self.additional_target]
def get_shapes(self):
"""Return `shapes` for the target and aux_target."""
shapes = self.shapes
shapes["representative"] = shapes["input"]
shapes["equiv_x"] = shapes["input"]
shapes[None] = None
return shapes["target"], shapes[self.additional_target]
### Base Datamodule ###
# cannot use abc because inheriting from lightning :( )
class LossylessDataModule(LightningDataModule):
"""Base class for data module for lossy compression but lossless predicitons.
Notes
-----
- similar to pl_bolts.datamodule.CIFAR10DataModule but more easily modifiable.
Parameters
-----------
data_dir : str, optional
Directory for saving/loading the dataset.
val_size : int or float, optional
How many examples to use for validation. This will generate new examples if possible, or
split from the training set. If float this is in ratio of training size, eg 0.1 is 10%.
test_size : int, optional
How many examples to use for test. `None` means all.
num_workers : int, optional
How many workers to use for loading data
batch_size : int, optional
Number of example per batch for training.
val_batch_size : int or None, optional
Number of example per batch during eval and test. If None uses `batch_size`.
seed : int, optional
Pseudo random seed.
reload_dataloaders_every_epoch : bool, optional
Whether to reload (all) dataloaders at each epoch.
dataset_kwargs : dict, optional
Additional arguments for the dataset.
"""
def __init__(
self,
data_dir=DIR,
val_size=0.1,
test_size=None,
num_workers=16,
batch_size=128,
val_batch_size=None,
seed=123,
reload_dataloaders_every_epoch=False,
dataset_kwargs={},
):
super().__init__()
self.data_dir = data_dir
self.val_size = val_size
self.test_size = test_size
self.num_workers = num_workers
self.batch_size = batch_size
self.val_batch_size = batch_size if val_batch_size is None else val_batch_size
self.seed = seed
self.dataset_kwargs = dataset_kwargs
self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch
@property
def Dataset(self):
"""Return the correct dataset."""
raise NotImplementedError()
def get_train_dataset(self, **dataset_kwargs):
"""Return the training dataset."""
raise NotImplementedError()
def get_val_dataset(self, **dataset_kwargs):
"""Return the validation dataset."""
raise NotImplementedError()
def get_test_dataset(self, **dataset_kwargs):
"""Return the test dataset."""
raise NotImplementedError()
def prepare_data(self):
"""Dowload and save data on file if needed."""
raise NotImplementedError()
@property
def mode(self):
"""Says what is the mode/type of data. E.g. images, distributions, ...."""
raise NotImplementedError()
@property
def dataset(self):
"""Return the underlying (train) datset ...."""
dataset = self.train_dataset
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
return dataset
def set_info_(self):
"""Sets some information from the dataset."""
dataset = self.dataset
self.target_is_clf, self.aux_is_clf = dataset.get_is_clf()
self.target_shape, self.aux_shape = dataset.get_shapes()
self.shape = dataset.shapes["input"]
self.additional_target = dataset.additional_target
@property
def balancing_weights(self):
"""Dictionary mapping every target to a weight that examples from this class should carry."""
return dict()
def setup(self, stage=None):
"""Prepare the datasets for the current stage."""
if stage == "fit" or stage is None:
self.train_dataset = self.get_train_dataset(**self.dataset_kwargs)
self.set_info_()
self.val_dataset = self.get_val_dataset(**self.dataset_kwargs)
if stage == "test" or stage is None:
self.test_dataset = self.get_test_dataset(**self.dataset_kwargs)
def train_dataloader(self, batch_size=None, train_dataset=None, **kwargs):
"""Return the training dataloader while possibly modifying dataset kwargs."""
dkwargs = kwargs.pop("dataset_kwargs", {})
if self.reload_dataloaders_every_epoch or len(dkwargs) > 0:
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
train_dataset = self.get_train_dataset(**curr_kwargs)
if train_dataset is None:
train_dataset = self.train_dataset
if batch_size is None:
batch_size = self.batch_size
return DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
**kwargs,
)
def val_dataloader(self, batch_size=None, **kwargs):
"""Return the validation dataloader while possibly modifying dataset kwargs."""
dkwargs = kwargs.pop("dataset_kwargs", {})
if self.reload_dataloaders_every_epoch or len(dkwargs) > 0:
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
self.val_dataset = self.get_val_dataset(**curr_kwargs)
if batch_size is None:
batch_size = self.val_batch_size
return DataLoader(
self.val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
**kwargs,
)
def test_dataloader(self, batch_size=None, **kwargs):
"""Return the test dataloader while possibly modifying dataset kwargs."""
dkwargs = kwargs.pop("dataset_kwargs", {})
if self.reload_dataloaders_every_epoch or len(dkwargs) > 0:
curr_kwargs = dict(self.dataset_kwargs, **dkwargs)
self.test_dataset = self.get_test_dataset(**curr_kwargs)
if batch_size is None:
batch_size = self.val_batch_size
return DataLoader(
self.test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
**kwargs,
)
def eval_dataloader(self, is_eval_on_test, **kwargs):
"""Return the evaluation dataloader (test or val)."""
if is_eval_on_test:
return self.test_dataloader(**kwargs)
else:
return self.val_dataloader(**kwargs)
| en | 0.63976 | ### Base Dataset ### Base class for lossy compression but lossless predicitons. Parameters ----------- additional_target : {"input", "representative", "equiv_x", "target", None}, optional Additional target to append to the target. `"input"` is the input example (i.e. augmented), `"representative"` is a representative of the equivalence class (always the same). `"equiv_x"` is some random equivalent x. "target" uses agin the target (i.e. duplicate). equivalence : str or set of str, optional Equivalence relationship with respect to which to be invariant. Depends on the dataset. `None` means no equivalence. is_normalize : bool, optional Whether to normalize the data. seed : int, optional Pseudo random seed. Return the correct example, target, and maximal invariant. Return a representative element for current Mx. Return some other random element from same equivalence class. Return a dictionary saying whether `input`, `target`, should be classified. Return dictionary giving the shape `input`, `target`. # representative element from same equivalence class # other element from same equivalence class # duplicate but makes code simpler Return `is_clf` for the target and aux_target. Return `shapes` for the target and aux_target. ### Base Datamodule ### # cannot use abc because inheriting from lightning :( ) Base class for data module for lossy compression but lossless predicitons. Notes ----- - similar to pl_bolts.datamodule.CIFAR10DataModule but more easily modifiable. Parameters ----------- data_dir : str, optional Directory for saving/loading the dataset. val_size : int or float, optional How many examples to use for validation. This will generate new examples if possible, or split from the training set. If float this is in ratio of training size, eg 0.1 is 10%. test_size : int, optional How many examples to use for test. `None` means all. num_workers : int, optional How many workers to use for loading data batch_size : int, optional Number of example per batch for training. val_batch_size : int or None, optional Number of example per batch during eval and test. If None uses `batch_size`. seed : int, optional Pseudo random seed. reload_dataloaders_every_epoch : bool, optional Whether to reload (all) dataloaders at each epoch. dataset_kwargs : dict, optional Additional arguments for the dataset. Return the correct dataset. Return the training dataset. Return the validation dataset. Return the test dataset. Dowload and save data on file if needed. Says what is the mode/type of data. E.g. images, distributions, .... Return the underlying (train) datset .... Sets some information from the dataset. Dictionary mapping every target to a weight that examples from this class should carry. Prepare the datasets for the current stage. Return the training dataloader while possibly modifying dataset kwargs. Return the validation dataloader while possibly modifying dataset kwargs. Return the test dataloader while possibly modifying dataset kwargs. Return the evaluation dataloader (test or val). | 2.716174 | 3 |
test/dotest.py | notestaff/dockstore-tool-cms2 | 0 | 6620012 | import pandas as pd
with pd.HDFStore('default.all_component_stats.h5', mode='r') as store:
hapset_data = store['hapset_data']
hapset_data.info(verbose=True, show_counts=True)
hapset_data.index.to_frame().info(verbose=True, show_counts=True)
hapset_metadata = store['hapset_metadata']
hapset_metadata.info(verbose=True, show_counts=True)
hapset_metadata.index.to_frame().info(verbose=True, show_counts=True)
print(hapset_metadata.describe())
print(hapset_metadata)
#print('\n'.join(hap.columns)+'\n')
| import pandas as pd
with pd.HDFStore('default.all_component_stats.h5', mode='r') as store:
hapset_data = store['hapset_data']
hapset_data.info(verbose=True, show_counts=True)
hapset_data.index.to_frame().info(verbose=True, show_counts=True)
hapset_metadata = store['hapset_metadata']
hapset_metadata.info(verbose=True, show_counts=True)
hapset_metadata.index.to_frame().info(verbose=True, show_counts=True)
print(hapset_metadata.describe())
print(hapset_metadata)
#print('\n'.join(hap.columns)+'\n')
| bn | 0.083396 | #print('\n'.join(hap.columns)+'\n') | 2.288265 | 2 |
3rd_module/IX-Listas_I.py | PinillosDev/PythonBasics-es | 3 | 6620013 | """
Hasta ahora hemos visto cómo podemos guardar un valor en una variable.
Este tipo de variables donde solo se guarda un dato a la vez (ya sea int, str, etc) se llama escalar.
¿Te imaginas una tipo de dato donde puedas guardar más variables? Algo así como una variable donde
puedas guardar int, float, string y hasta otras variables.
Este tipo de dato existe y se llaman lista. Es como una variable, pero a la vez no, porque hace
cosas que una variable escalar no puede hacer. En resumen: las listas son un tipo de dato en la que puedes guardar
lo que quieras.
"""
# _______________________________________________________________________________
MyList = [123, 'string', 3.1416, 'a'] # Así se ven las listas
"""
Veamos su anatomía:
- MyList es el nombre
- [] Todo lo que va dentro de los corchetes son los elementos de la lista, los cuales
deben ir separador por comas.
"""
# _______________________________________________________________________________
"""
¿Cómo podemos ver, editar y eliminar elementos dentro de una lista?
Antes de todo, debemos entender el concepto de índice.
Un índice es la manera en la que identificamos un elemento dentro de una lista.
Los índices son números, que empiezan desde cero.
0 1 2 3
MyList = [123, 'string', 3.1415, 'a']
Y así sucesivamente con los demás elementos.
Cuando decimos el elemento #2, hacemos referencia a 3.1415.
"""
# _______________________________________________________________________________
# VER ELEMENTOS DE UNA LISTA
print(MyList[1]) # Con los índices podemos acceder a los elementos de una lista
# CAMBIAR ELEMENTOS DE UNA LISTA
MyList[0] = MyList[2] # Cambiar elementos por otros elementos de la misma lista
MyList[3] = 'b' # Cambiar elementos por otro dato que queramos
# ELIMINAR ELEMENTOS DE UNA LISTA
del MyList[2] # la instrucción 'del' es para eliminar un elemento de una lista con base en un índice
"""
NOTA:
- Cuando se dice 'espacio en memoria' se hace referencia al lugar donde en verdad se gurda algo.
Una variable es una representación de ese espacio en memoria.
- Si a una variable se le asigna el nombre de una lista, no hace una copia de una lista,
pero hace que las variable apunte a la misma lista en memoria. Es decir, si se le asigna
a una variable una lista, tanto la lista como la variable apuntarán al mismo espacio en memoria.
- El nombre de la lista es una representación de un espacio en memoria.
Es por eso mismo, que cuando haces variable = MyList y cambias el valor de 'variable', los cambios
también se verán en 'MyList', porque ambas son representaciones diferentes del mismo espacio en memoria.
"""
# _______________________________________________________________________________
# FUNCIONES VS MÉTODOS
"""
Ya hemos visto qué es una función y hemos trabajado con ellas. Pero ahora se introducirá un concepto
que es muy importante, y es el de los métodos.
Una función es una estructura de código que produce o modifica algo. Sin embargo,
una función puede ser aplicable a todo el código es decir, puedes aplicar print() a números,
cadenas y listas. ¿Existe una función que solo sea aplicable a datos tipo int, por ejemplo?
Los métodos son como las funciones, pero solo se pueden aplicar a ciertos tipo de datos. Es decir,
hay métodos para listas, métodos para cadenas, métodos para enteros, etc. No se puede aplicar un método
de lista en un tipo de dato numérico.
Los métodos son importantes porque nos permiten tener mayor control en el flujo de los tipos de datos
y el cómo se comportan estos en nuestro programa.
Un método tiene la siguiente estructura:
data.method(argumento)
data: Es la variable a manipular, debe ser un tipo de datos específico.
method: El nombre del método
"""
# Veamos un ejemplo de método para un tipo de dato string
# NOTA: Las cadenas también tienen índices
saludo = 'Holaaa'
print(saludo[2]) # Salida: 'l'
saludo.count('a') # El método count cuenta las veces que un argumento está en una variable
# salida: 3
# Para más información acerca de métodos para strings:
# https://controlautomaticoeducacion.com/python-desde-cero/metodos-string-en-python/
"""
Espero que te hayan quedado claro los temas vistos en este capítulo:
- Diferencia entre una variable escalar y una lista
- Cómo crear y manipular básicamente una lista
- Diferencia entre función y método
¡En el siguiente capítulo aprenderás más cosas acerca de las listas! ;)
""" | """
Hasta ahora hemos visto cómo podemos guardar un valor en una variable.
Este tipo de variables donde solo se guarda un dato a la vez (ya sea int, str, etc) se llama escalar.
¿Te imaginas una tipo de dato donde puedas guardar más variables? Algo así como una variable donde
puedas guardar int, float, string y hasta otras variables.
Este tipo de dato existe y se llaman lista. Es como una variable, pero a la vez no, porque hace
cosas que una variable escalar no puede hacer. En resumen: las listas son un tipo de dato en la que puedes guardar
lo que quieras.
"""
# _______________________________________________________________________________
MyList = [123, 'string', 3.1416, 'a'] # Así se ven las listas
"""
Veamos su anatomía:
- MyList es el nombre
- [] Todo lo que va dentro de los corchetes son los elementos de la lista, los cuales
deben ir separador por comas.
"""
# _______________________________________________________________________________
"""
¿Cómo podemos ver, editar y eliminar elementos dentro de una lista?
Antes de todo, debemos entender el concepto de índice.
Un índice es la manera en la que identificamos un elemento dentro de una lista.
Los índices son números, que empiezan desde cero.
0 1 2 3
MyList = [123, 'string', 3.1415, 'a']
Y así sucesivamente con los demás elementos.
Cuando decimos el elemento #2, hacemos referencia a 3.1415.
"""
# _______________________________________________________________________________
# VER ELEMENTOS DE UNA LISTA
print(MyList[1]) # Con los índices podemos acceder a los elementos de una lista
# CAMBIAR ELEMENTOS DE UNA LISTA
MyList[0] = MyList[2] # Cambiar elementos por otros elementos de la misma lista
MyList[3] = 'b' # Cambiar elementos por otro dato que queramos
# ELIMINAR ELEMENTOS DE UNA LISTA
del MyList[2] # la instrucción 'del' es para eliminar un elemento de una lista con base en un índice
"""
NOTA:
- Cuando se dice 'espacio en memoria' se hace referencia al lugar donde en verdad se gurda algo.
Una variable es una representación de ese espacio en memoria.
- Si a una variable se le asigna el nombre de una lista, no hace una copia de una lista,
pero hace que las variable apunte a la misma lista en memoria. Es decir, si se le asigna
a una variable una lista, tanto la lista como la variable apuntarán al mismo espacio en memoria.
- El nombre de la lista es una representación de un espacio en memoria.
Es por eso mismo, que cuando haces variable = MyList y cambias el valor de 'variable', los cambios
también se verán en 'MyList', porque ambas son representaciones diferentes del mismo espacio en memoria.
"""
# _______________________________________________________________________________
# FUNCIONES VS MÉTODOS
"""
Ya hemos visto qué es una función y hemos trabajado con ellas. Pero ahora se introducirá un concepto
que es muy importante, y es el de los métodos.
Una función es una estructura de código que produce o modifica algo. Sin embargo,
una función puede ser aplicable a todo el código es decir, puedes aplicar print() a números,
cadenas y listas. ¿Existe una función que solo sea aplicable a datos tipo int, por ejemplo?
Los métodos son como las funciones, pero solo se pueden aplicar a ciertos tipo de datos. Es decir,
hay métodos para listas, métodos para cadenas, métodos para enteros, etc. No se puede aplicar un método
de lista en un tipo de dato numérico.
Los métodos son importantes porque nos permiten tener mayor control en el flujo de los tipos de datos
y el cómo se comportan estos en nuestro programa.
Un método tiene la siguiente estructura:
data.method(argumento)
data: Es la variable a manipular, debe ser un tipo de datos específico.
method: El nombre del método
"""
# Veamos un ejemplo de método para un tipo de dato string
# NOTA: Las cadenas también tienen índices
saludo = 'Holaaa'
print(saludo[2]) # Salida: 'l'
saludo.count('a') # El método count cuenta las veces que un argumento está en una variable
# salida: 3
# Para más información acerca de métodos para strings:
# https://controlautomaticoeducacion.com/python-desde-cero/metodos-string-en-python/
"""
Espero que te hayan quedado claro los temas vistos en este capítulo:
- Diferencia entre una variable escalar y una lista
- Cómo crear y manipular básicamente una lista
- Diferencia entre función y método
¡En el siguiente capítulo aprenderás más cosas acerca de las listas! ;)
""" | es | 0.96417 | Hasta ahora hemos visto cómo podemos guardar un valor en una variable. Este tipo de variables donde solo se guarda un dato a la vez (ya sea int, str, etc) se llama escalar. ¿Te imaginas una tipo de dato donde puedas guardar más variables? Algo así como una variable donde puedas guardar int, float, string y hasta otras variables. Este tipo de dato existe y se llaman lista. Es como una variable, pero a la vez no, porque hace cosas que una variable escalar no puede hacer. En resumen: las listas son un tipo de dato en la que puedes guardar lo que quieras. # _______________________________________________________________________________ # Así se ven las listas Veamos su anatomía: - MyList es el nombre - [] Todo lo que va dentro de los corchetes son los elementos de la lista, los cuales deben ir separador por comas. # _______________________________________________________________________________ ¿Cómo podemos ver, editar y eliminar elementos dentro de una lista? Antes de todo, debemos entender el concepto de índice. Un índice es la manera en la que identificamos un elemento dentro de una lista. Los índices son números, que empiezan desde cero. 0 1 2 3 MyList = [123, 'string', 3.1415, 'a'] Y así sucesivamente con los demás elementos. Cuando decimos el elemento #2, hacemos referencia a 3.1415. # _______________________________________________________________________________ # VER ELEMENTOS DE UNA LISTA # Con los índices podemos acceder a los elementos de una lista # CAMBIAR ELEMENTOS DE UNA LISTA # Cambiar elementos por otros elementos de la misma lista # Cambiar elementos por otro dato que queramos # ELIMINAR ELEMENTOS DE UNA LISTA # la instrucción 'del' es para eliminar un elemento de una lista con base en un índice NOTA: - Cuando se dice 'espacio en memoria' se hace referencia al lugar donde en verdad se gurda algo. Una variable es una representación de ese espacio en memoria. - Si a una variable se le asigna el nombre de una lista, no hace una copia de una lista, pero hace que las variable apunte a la misma lista en memoria. Es decir, si se le asigna a una variable una lista, tanto la lista como la variable apuntarán al mismo espacio en memoria. - El nombre de la lista es una representación de un espacio en memoria. Es por eso mismo, que cuando haces variable = MyList y cambias el valor de 'variable', los cambios también se verán en 'MyList', porque ambas son representaciones diferentes del mismo espacio en memoria. # _______________________________________________________________________________ # FUNCIONES VS MÉTODOS Ya hemos visto qué es una función y hemos trabajado con ellas. Pero ahora se introducirá un concepto que es muy importante, y es el de los métodos. Una función es una estructura de código que produce o modifica algo. Sin embargo, una función puede ser aplicable a todo el código es decir, puedes aplicar print() a números, cadenas y listas. ¿Existe una función que solo sea aplicable a datos tipo int, por ejemplo? Los métodos son como las funciones, pero solo se pueden aplicar a ciertos tipo de datos. Es decir, hay métodos para listas, métodos para cadenas, métodos para enteros, etc. No se puede aplicar un método de lista en un tipo de dato numérico. Los métodos son importantes porque nos permiten tener mayor control en el flujo de los tipos de datos y el cómo se comportan estos en nuestro programa. Un método tiene la siguiente estructura: data.method(argumento) data: Es la variable a manipular, debe ser un tipo de datos específico. method: El nombre del método # Veamos un ejemplo de método para un tipo de dato string # NOTA: Las cadenas también tienen índices # Salida: 'l' # El método count cuenta las veces que un argumento está en una variable # salida: 3 # Para más información acerca de métodos para strings: # https://controlautomaticoeducacion.com/python-desde-cero/metodos-string-en-python/ Espero que te hayan quedado claro los temas vistos en este capítulo: - Diferencia entre una variable escalar y una lista - Cómo crear y manipular básicamente una lista - Diferencia entre función y método ¡En el siguiente capítulo aprenderás más cosas acerca de las listas! ;) | 4.175593 | 4 |
nano/utils.py | sanjeevan/nanoinvoice | 1 | 6620014 | <gh_stars>1-10
"""
Utils has nothing to do with models and views.
"""
from datetime import datetime
import json
import decimal
import random
VARCHAR_LEN_128 = 128
ALPHABET = "bcdfghjklmnpqrstvwxyz0123456789BCDFGHJKLMNPQRSTVWXYZ"
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def json_dumps(obj):
class DateTimeJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(DateTimeJSONEncoder, self).default(obj)
return json.dumps(obj, cls=DateTimeJSONEncoder)
def model_to_dict(model):
d = {}
for c in model.__table__.columns:
val = getattr(model, c.name)
d[c.name] = val
return d
def get_current_time():
return datetime.utcnow()
def pretty_date(dt, default=None):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/
"""
if default is None:
default = 'just now'
now = datetime.utcnow()
diff = now - dt
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if not period:
continue
if period == 1:
return u'%d %s ago' % (period, singular)
else:
return u'%d %s ago' % (period, plural)
return default
def random_id(length):
return ''.join([random.choice(ALPHABET) for i in xrange(length)])
| """
Utils has nothing to do with models and views.
"""
from datetime import datetime
import json
import decimal
import random
VARCHAR_LEN_128 = 128
ALPHABET = "bcdfghjklmnpqrstvwxyz0123456789BCDFGHJKLMNPQRSTVWXYZ"
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def json_dumps(obj):
class DateTimeJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(DateTimeJSONEncoder, self).default(obj)
return json.dumps(obj, cls=DateTimeJSONEncoder)
def model_to_dict(model):
d = {}
for c in model.__table__.columns:
val = getattr(model, c.name)
d[c.name] = val
return d
def get_current_time():
return datetime.utcnow()
def pretty_date(dt, default=None):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/
"""
if default is None:
default = 'just now'
now = datetime.utcnow()
diff = now - dt
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if not period:
continue
if period == 1:
return u'%d %s ago' % (period, singular)
else:
return u'%d %s ago' % (period, plural)
return default
def random_id(length):
return ''.join([random.choice(ALPHABET) for i in xrange(length)]) | en | 0.809418 | Utils has nothing to do with models and views. Returns string representing "time since" e.g. 3 days ago, 5 hours ago etc. Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/ | 2.442713 | 2 |
PART05/19_datetime_format.py | arti1117/python-machine-learning-pandas-data-analytics | 1 | 6620015 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 8 18:31:16 2020
@author: arti
"""
import pandas as pd
df = pd.read_csv('stock-data.csv')
df['new_Date'] = pd.to_datetime(df.Date)
print(df.head())
print('--')
df['Year'] = df.new_Date.dt.year
df['Month'] = df.new_Date.dt.month
df['Day'] = df.new_Date.dt.day
print(df.head())
print('--')
df['Date_yr'] = df.new_Date.dt.to_period(freq='A')
df['Date_m'] = df.new_Date.dt.to_period(freq='M')
print(df.head())
print('--')
df.set_index('Date_m', inplace=True)
print(df.head())
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 8 18:31:16 2020
@author: arti
"""
import pandas as pd
df = pd.read_csv('stock-data.csv')
df['new_Date'] = pd.to_datetime(df.Date)
print(df.head())
print('--')
df['Year'] = df.new_Date.dt.year
df['Month'] = df.new_Date.dt.month
df['Day'] = df.new_Date.dt.day
print(df.head())
print('--')
df['Date_yr'] = df.new_Date.dt.to_period(freq='A')
df['Date_m'] = df.new_Date.dt.to_period(freq='M')
print(df.head())
print('--')
df.set_index('Date_m', inplace=True)
print(df.head())
| en | 0.517623 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Sat Aug 8 18:31:16 2020 @author: arti | 3.497607 | 3 |
app.py | lucasvalentim/deep-learning-exemples | 0 | 6620016 | import io
import numpy as np
import flask
from decouple import config
from PIL import Image
from exemples.mnist import mnist
DEBUG = config('DEBUG', default=False)
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('mnist.html')
@app.route('/cat-vs-dog')
def cat_vs_dog():
return flask.render_template('cat-vs-dog.html')
@app.route('/pneumonia-detector')
def pneumonia_detector():
return flask.render_template('pneumonia-detector.html')
@app.route('/sentiment-analysis')
def sentiment_analysis():
return flask.render_template('sentiment-analysis.html')
@app.route('/api/mnist/predict', methods=['POST'])
def mnist_predict():
data = {'success': False}
if flask.request.method == 'POST':
if flask.request.files.get('image'):
image = flask.request.files['image'].read()
image = Image.open(io.BytesIO(image))
image = mnist.prepare_image(image)
model = mnist.load_production_model()
preds = model.predict(image)
data['prediction'] = mnist.decode_predictions(preds[0])
data['success'] = True
return flask.jsonify(data)
if __name__ == '__main__':
app.run(debug=DEBUG)
| import io
import numpy as np
import flask
from decouple import config
from PIL import Image
from exemples.mnist import mnist
DEBUG = config('DEBUG', default=False)
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('mnist.html')
@app.route('/cat-vs-dog')
def cat_vs_dog():
return flask.render_template('cat-vs-dog.html')
@app.route('/pneumonia-detector')
def pneumonia_detector():
return flask.render_template('pneumonia-detector.html')
@app.route('/sentiment-analysis')
def sentiment_analysis():
return flask.render_template('sentiment-analysis.html')
@app.route('/api/mnist/predict', methods=['POST'])
def mnist_predict():
data = {'success': False}
if flask.request.method == 'POST':
if flask.request.files.get('image'):
image = flask.request.files['image'].read()
image = Image.open(io.BytesIO(image))
image = mnist.prepare_image(image)
model = mnist.load_production_model()
preds = model.predict(image)
data['prediction'] = mnist.decode_predictions(preds[0])
data['success'] = True
return flask.jsonify(data)
if __name__ == '__main__':
app.run(debug=DEBUG)
| none | 1 | 2.501404 | 3 | |
cogs/jokes.py | Thfona/CaPy | 0 | 6620017 | <filename>cogs/jokes.py
import random
import discord
from discord.ext import commands
class Jokes(commands.Cog):
def __init__(self, client):
self.client = client
# Commands
@commands.command()
async def sorte(self, context, *args):
question = ' '.join(args)
if (not question):
await context.send('Você precisa me perguntar alguma coisa.')
return
responses = ['Com certeza.', 'Decididamente sim.', 'Sem dúvida.', 'Definitivamente.', 'Pode contar com isso.', 'Na minha visão, sim.', 'Provavelmente.', 'Parece que sim.', 'Sim.', 'Tudo indica que sim.', 'A resposta não tá muito clara, tenta de novo depois.',
'Me pergunta de novo depois.', 'Melhor eu não te falar agora.', 'Não consigo te responder isso agora.', 'Se concentra e pergunta de novo.', 'Não dá pra contar com isso.', 'Minha resposta é não.', 'Minhas fontes dizem que não.', 'Parece que não.', 'Duvido.']
await context.send(f'{random.choice(responses)}')
def setup(client):
client.add_cog(Jokes(client))
| <filename>cogs/jokes.py
import random
import discord
from discord.ext import commands
class Jokes(commands.Cog):
def __init__(self, client):
self.client = client
# Commands
@commands.command()
async def sorte(self, context, *args):
question = ' '.join(args)
if (not question):
await context.send('Você precisa me perguntar alguma coisa.')
return
responses = ['Com certeza.', 'Decididamente sim.', 'Sem dúvida.', 'Definitivamente.', 'Pode contar com isso.', 'Na minha visão, sim.', 'Provavelmente.', 'Parece que sim.', 'Sim.', 'Tudo indica que sim.', 'A resposta não tá muito clara, tenta de novo depois.',
'Me pergunta de novo depois.', 'Melhor eu não te falar agora.', 'Não consigo te responder isso agora.', 'Se concentra e pergunta de novo.', 'Não dá pra contar com isso.', 'Minha resposta é não.', 'Minhas fontes dizem que não.', 'Parece que não.', 'Duvido.']
await context.send(f'{random.choice(responses)}')
def setup(client):
client.add_cog(Jokes(client))
| en | 0.728294 | # Commands | 2.706351 | 3 |
test/test.py | anenriquez/STNU | 0 | 6620018 | import json
from stn.stp import STP
STNU = "data/stnu_two_tasks.json"
if __name__ == '__main__':
with open(STNU) as json_file:
stnu_dict = json.load(json_file)
# Convert the dict to a json string
stnu_json = json.dumps(stnu_dict)
stp = STP('dsc_lp')
stn = stp.get_stn(stn_json=stnu_json)
print(stn)
stn_dict = stn.to_dict()
print(stn_dict)
print(type(stn_dict['nodes'][0]['data']))
| import json
from stn.stp import STP
STNU = "data/stnu_two_tasks.json"
if __name__ == '__main__':
with open(STNU) as json_file:
stnu_dict = json.load(json_file)
# Convert the dict to a json string
stnu_json = json.dumps(stnu_dict)
stp = STP('dsc_lp')
stn = stp.get_stn(stn_json=stnu_json)
print(stn)
stn_dict = stn.to_dict()
print(stn_dict)
print(type(stn_dict['nodes'][0]['data']))
| en | 0.345333 | # Convert the dict to a json string | 2.842418 | 3 |
invest_natcap/fisheries/fisheries_hst.py | phargogh/invest-natcap.invest-3 | 0 | 6620019 | <filename>invest_natcap/fisheries/fisheries_hst.py
'''
The Fisheries Habitat Scenario Tool module contains the high-level code for
generating a new Population Parameters CSV File based on habitat area
change and the dependencies that particular classes of the given species
have on particular habitats.
'''
import logging
import pprint
import numpy as np
try:
from invest_natcap.fisheries import fisheries_hst_io as io
except:
import fisheries_hst_io as io
pp = pprint.PrettyPrinter(indent=4)
LOGGER = logging.getLogger('invest_natcap.fisheries.hst')
logging.basicConfig(format='%(asctime)s %(name)-15s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
def execute(args):
'''
Entry point into the Fisheries Habitat Scenario Tool
The Fisheries Habitat Scenario Tool generates a new Population Parameters
CSV File with modified survival attributes across classes and regions
based on habitat area changes and class-level dependencies on those
habitats.
:param str args['workspace_dir']: location into which the resultant
modified Population Parameters CSV file should be placed.
:param str args['sexsp']: specifies whether or not the age and stage
classes are distinguished by sex. Options: 'Yes' or 'No'
:param str args['population_csv_uri']: location of the population
parameters csv file. This file contains all age and stage specific
parameters.
:param str args['habitat_chg_csv_uri']: location of the habitat change
parameters csv file. This file contains habitat area change
information.
:param str args['habitat_dep_csv_uri']: location of the habitat dependency
parameters csv file. This file contains habitat-class dependency
information.
:param float args['gamma']: describes the relationship between a change
in habitat area and a change in survival of life stages dependent on
that habitat
Returns:
None
Example Args::
args = {
'workspace_dir': 'path/to/workspace_dir/',
'sexsp': 'Yes',
'population_csv_uri': 'path/to/csv',
'habitat_chg_csv_uri': 'path/to/csv',
'habitat_dep_csv_uri': 'path/to/csv',
'gamma': 0.5,
}
Note:
+ Modified Population Parameters CSV File saved to 'workspace_dir/output/'
'''
# Parse, Verify Inputs
vars_dict = io.fetch_args(args)
# Convert Data
vars_dict = convert_survival_matrix(vars_dict)
# Generate Modified Population Parameters CSV File
io.save_population_csv(vars_dict)
def convert_survival_matrix(vars_dict):
'''
Creates a new survival matrix based on the information provided by
the user related to habitat area changes and class-level dependencies
on those habitats.
Args:
vars_dict (dictionary): see fisheries_preprocessor_io.fetch_args for
example
Returns:
vars_dict (dictionary): modified vars_dict with new Survival matrix
accessible using the key 'Surv_nat_xsa_mod' with element values
that exist between [0,1]
Example Returns::
ret = {
# Other Variables...
'Surv_nat_xsa_mod': np.ndarray([...])
}
'''
# Fetch original survival matrix
S_sxa = vars_dict['Surv_nat_xsa'].swapaxes(0, 1)
# Fetch conversion parameters
gamma = vars_dict['gamma']
H_chg_hx = vars_dict['Hab_chg_hx'] # H_hx
D_ha = vars_dict['Hab_dep_ha'] # D_ah
t_a = vars_dict['Hab_class_mvmt_a'] # T_a
n_a = vars_dict['Hab_dep_num_a'] # n_h
n_a[n_a == 0] = 0
num_habitats = len(vars_dict['Habitats'])
num_classes = len(vars_dict['Classes'])
num_regions = len(vars_dict['Regions'])
# Apply function
Mod_elements_xha = np.ones([num_regions, num_habitats, num_classes])
A = Mod_elements_xha * D_ha
A[A != 0] = 1
Mod_elements_xha = A
# Create element-wise exponents
Exp_xha = Mod_elements_xha * D_ha * gamma
# Swap Axes in Arrays showing modified elements
Mod_elements_ahx = Mod_elements_xha.swapaxes(0, 2)
# Absolute percent change in habitat size across all elements
H_chg_all_ahx = (Mod_elements_ahx * H_chg_hx)
nonzero_elements = (H_chg_all_ahx != 0)
H_chg_all_ahx[nonzero_elements] += 1
# Swap Axes
H_chg_all_xha = H_chg_all_ahx.swapaxes(0, 2)
# Apply sensitivity exponent to habitat area change matrix
H_xha = (H_chg_all_xha ** Exp_xha)
ones_elements = (H_xha == 1)
H_xha[ones_elements] = 0
# Sum across habitats
H_xa = H_xha.sum(axis=1)
# Divide by number of habitats and cancel non-class-transition elements
H_xa_weighted = (H_xa * t_a) / n_a
# Add unchanged elements back in to matrix
nan_elements = np.isnan(H_xa_weighted)
H_xa_weighted[nan_elements] = 1
zero_elements = (H_xa_weighted == 0)
H_xa_weighted[zero_elements] = 1
H_coefficient_xa = H_xa_weighted
# Multiply coefficients by original Survival matrix
nan_idx = np.isnan(H_coefficient_xa)
H_coefficient_xa[nan_idx] = 1
S_mod_sxa = S_sxa * H_coefficient_xa
# Filter and correct for elements outside [0, 1]
S_mod_sxa[S_mod_sxa > 1.0] = 1
S_mod_sxa[S_mod_sxa < 0.0] = 0
# Return
vars_dict['Surv_nat_xsa_mod'] = S_mod_sxa.swapaxes(0, 1)
return vars_dict
| <filename>invest_natcap/fisheries/fisheries_hst.py
'''
The Fisheries Habitat Scenario Tool module contains the high-level code for
generating a new Population Parameters CSV File based on habitat area
change and the dependencies that particular classes of the given species
have on particular habitats.
'''
import logging
import pprint
import numpy as np
try:
from invest_natcap.fisheries import fisheries_hst_io as io
except:
import fisheries_hst_io as io
pp = pprint.PrettyPrinter(indent=4)
LOGGER = logging.getLogger('invest_natcap.fisheries.hst')
logging.basicConfig(format='%(asctime)s %(name)-15s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
def execute(args):
'''
Entry point into the Fisheries Habitat Scenario Tool
The Fisheries Habitat Scenario Tool generates a new Population Parameters
CSV File with modified survival attributes across classes and regions
based on habitat area changes and class-level dependencies on those
habitats.
:param str args['workspace_dir']: location into which the resultant
modified Population Parameters CSV file should be placed.
:param str args['sexsp']: specifies whether or not the age and stage
classes are distinguished by sex. Options: 'Yes' or 'No'
:param str args['population_csv_uri']: location of the population
parameters csv file. This file contains all age and stage specific
parameters.
:param str args['habitat_chg_csv_uri']: location of the habitat change
parameters csv file. This file contains habitat area change
information.
:param str args['habitat_dep_csv_uri']: location of the habitat dependency
parameters csv file. This file contains habitat-class dependency
information.
:param float args['gamma']: describes the relationship between a change
in habitat area and a change in survival of life stages dependent on
that habitat
Returns:
None
Example Args::
args = {
'workspace_dir': 'path/to/workspace_dir/',
'sexsp': 'Yes',
'population_csv_uri': 'path/to/csv',
'habitat_chg_csv_uri': 'path/to/csv',
'habitat_dep_csv_uri': 'path/to/csv',
'gamma': 0.5,
}
Note:
+ Modified Population Parameters CSV File saved to 'workspace_dir/output/'
'''
# Parse, Verify Inputs
vars_dict = io.fetch_args(args)
# Convert Data
vars_dict = convert_survival_matrix(vars_dict)
# Generate Modified Population Parameters CSV File
io.save_population_csv(vars_dict)
def convert_survival_matrix(vars_dict):
'''
Creates a new survival matrix based on the information provided by
the user related to habitat area changes and class-level dependencies
on those habitats.
Args:
vars_dict (dictionary): see fisheries_preprocessor_io.fetch_args for
example
Returns:
vars_dict (dictionary): modified vars_dict with new Survival matrix
accessible using the key 'Surv_nat_xsa_mod' with element values
that exist between [0,1]
Example Returns::
ret = {
# Other Variables...
'Surv_nat_xsa_mod': np.ndarray([...])
}
'''
# Fetch original survival matrix
S_sxa = vars_dict['Surv_nat_xsa'].swapaxes(0, 1)
# Fetch conversion parameters
gamma = vars_dict['gamma']
H_chg_hx = vars_dict['Hab_chg_hx'] # H_hx
D_ha = vars_dict['Hab_dep_ha'] # D_ah
t_a = vars_dict['Hab_class_mvmt_a'] # T_a
n_a = vars_dict['Hab_dep_num_a'] # n_h
n_a[n_a == 0] = 0
num_habitats = len(vars_dict['Habitats'])
num_classes = len(vars_dict['Classes'])
num_regions = len(vars_dict['Regions'])
# Apply function
Mod_elements_xha = np.ones([num_regions, num_habitats, num_classes])
A = Mod_elements_xha * D_ha
A[A != 0] = 1
Mod_elements_xha = A
# Create element-wise exponents
Exp_xha = Mod_elements_xha * D_ha * gamma
# Swap Axes in Arrays showing modified elements
Mod_elements_ahx = Mod_elements_xha.swapaxes(0, 2)
# Absolute percent change in habitat size across all elements
H_chg_all_ahx = (Mod_elements_ahx * H_chg_hx)
nonzero_elements = (H_chg_all_ahx != 0)
H_chg_all_ahx[nonzero_elements] += 1
# Swap Axes
H_chg_all_xha = H_chg_all_ahx.swapaxes(0, 2)
# Apply sensitivity exponent to habitat area change matrix
H_xha = (H_chg_all_xha ** Exp_xha)
ones_elements = (H_xha == 1)
H_xha[ones_elements] = 0
# Sum across habitats
H_xa = H_xha.sum(axis=1)
# Divide by number of habitats and cancel non-class-transition elements
H_xa_weighted = (H_xa * t_a) / n_a
# Add unchanged elements back in to matrix
nan_elements = np.isnan(H_xa_weighted)
H_xa_weighted[nan_elements] = 1
zero_elements = (H_xa_weighted == 0)
H_xa_weighted[zero_elements] = 1
H_coefficient_xa = H_xa_weighted
# Multiply coefficients by original Survival matrix
nan_idx = np.isnan(H_coefficient_xa)
H_coefficient_xa[nan_idx] = 1
S_mod_sxa = S_sxa * H_coefficient_xa
# Filter and correct for elements outside [0, 1]
S_mod_sxa[S_mod_sxa > 1.0] = 1
S_mod_sxa[S_mod_sxa < 0.0] = 0
# Return
vars_dict['Surv_nat_xsa_mod'] = S_mod_sxa.swapaxes(0, 1)
return vars_dict
| en | 0.601046 | The Fisheries Habitat Scenario Tool module contains the high-level code for generating a new Population Parameters CSV File based on habitat area change and the dependencies that particular classes of the given species have on particular habitats. Entry point into the Fisheries Habitat Scenario Tool The Fisheries Habitat Scenario Tool generates a new Population Parameters CSV File with modified survival attributes across classes and regions based on habitat area changes and class-level dependencies on those habitats. :param str args['workspace_dir']: location into which the resultant modified Population Parameters CSV file should be placed. :param str args['sexsp']: specifies whether or not the age and stage classes are distinguished by sex. Options: 'Yes' or 'No' :param str args['population_csv_uri']: location of the population parameters csv file. This file contains all age and stage specific parameters. :param str args['habitat_chg_csv_uri']: location of the habitat change parameters csv file. This file contains habitat area change information. :param str args['habitat_dep_csv_uri']: location of the habitat dependency parameters csv file. This file contains habitat-class dependency information. :param float args['gamma']: describes the relationship between a change in habitat area and a change in survival of life stages dependent on that habitat Returns: None Example Args:: args = { 'workspace_dir': 'path/to/workspace_dir/', 'sexsp': 'Yes', 'population_csv_uri': 'path/to/csv', 'habitat_chg_csv_uri': 'path/to/csv', 'habitat_dep_csv_uri': 'path/to/csv', 'gamma': 0.5, } Note: + Modified Population Parameters CSV File saved to 'workspace_dir/output/' # Parse, Verify Inputs # Convert Data # Generate Modified Population Parameters CSV File Creates a new survival matrix based on the information provided by the user related to habitat area changes and class-level dependencies on those habitats. Args: vars_dict (dictionary): see fisheries_preprocessor_io.fetch_args for example Returns: vars_dict (dictionary): modified vars_dict with new Survival matrix accessible using the key 'Surv_nat_xsa_mod' with element values that exist between [0,1] Example Returns:: ret = { # Other Variables... 'Surv_nat_xsa_mod': np.ndarray([...]) } # Fetch original survival matrix # Fetch conversion parameters # H_hx # D_ah # T_a # n_h # Apply function # Create element-wise exponents # Swap Axes in Arrays showing modified elements # Absolute percent change in habitat size across all elements # Swap Axes # Apply sensitivity exponent to habitat area change matrix # Sum across habitats # Divide by number of habitats and cancel non-class-transition elements # Add unchanged elements back in to matrix # Multiply coefficients by original Survival matrix # Filter and correct for elements outside [0, 1] # Return | 2.784273 | 3 |
asym_rlpo/algorithms/dqn/adqn_state.py | abaisero/asym-porl | 2 | 6620020 | <gh_stars>1-10
from __future__ import annotations
from typing import Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from asym_rlpo.data import Episode, Torch_O, Torch_S
from .base import PO_EpisodicDQN_ABC
class ADQN_State(PO_EpisodicDQN_ABC):
model_keys = {
'agent': [
'action_model',
'observation_model',
'history_model',
'qh_model',
'state_model',
'qs_model',
]
}
def compute_q_values(
self,
models: nn.ModuleDict,
actions: torch.Tensor,
observations: Torch_O,
states: Torch_S,
) -> Tuple[torch.Tensor, torch.Tensor]:
history_features = self.compute_history_features(
models.agent.action_model,
models.agent.observation_model,
models.agent.history_model,
actions,
observations,
)
qh_values = models.agent.qh_model(history_features)
state_features = models.agent.state_model(states)
qs_values = models.agent.qs_model(state_features)
return qh_values, qs_values
def qs_loss(
self,
episode: Episode,
qh_values: torch.Tensor,
qs_values: torch.Tensor,
target_qh_values: torch.Tensor,
target_qs_values: torch.Tensor,
*,
discount: float,
) -> torch.Tensor:
qs_values = qs_values.gather(1, episode.actions.unsqueeze(-1)).squeeze(
-1
)
qs_values_bootstrap = (
target_qs_values.gather(
1, target_qh_values.argmax(-1).unsqueeze(-1)
)
.squeeze(-1)
.roll(-1, 0)
)
qs_values_bootstrap[-1] = 0.0
loss = F.mse_loss(
qs_values,
episode.rewards + discount * qs_values_bootstrap,
)
return loss
def qh_loss(
self,
episode: Episode,
qh_values: torch.Tensor,
qs_values: torch.Tensor,
target_qh_values: torch.Tensor,
target_qs_values: torch.Tensor,
*,
discount: float,
) -> torch.Tensor:
loss = F.mse_loss(
qh_values,
target_qs_values,
)
return loss
def episodic_loss(
self, episodes: Sequence[Episode], *, discount: float
) -> torch.Tensor:
losses = []
for episode in episodes:
qh_values, qs_values = self.compute_q_values(
self.models,
episode.actions,
episode.observations,
episode.states,
)
with torch.no_grad():
target_qh_values, target_qs_values = self.compute_q_values(
self.target_models,
episode.actions,
episode.observations,
episode.states,
)
qs_loss = self.qs_loss(
episode,
qh_values,
qs_values,
target_qh_values,
target_qs_values,
discount=discount,
)
qh_loss = self.qh_loss(
episode,
qh_values,
qs_values,
target_qh_values,
target_qs_values,
discount=discount,
)
loss = (qs_loss + qh_loss) / 2
losses.append(loss)
return sum(losses) / len(losses) # type: ignore
class ADQN_State_Bootstrap(ADQN_State):
def qh_loss(
self,
episode: Episode,
qh_values: torch.Tensor,
qs_values: torch.Tensor,
target_qh_values: torch.Tensor,
target_qs_values: torch.Tensor,
*,
discount: float,
) -> torch.Tensor:
qh_values = qh_values.gather(1, episode.actions.unsqueeze(-1)).squeeze(
-1
)
qs_values_bootstrap = (
target_qs_values.gather(
1, target_qh_values.argmax(-1).unsqueeze(-1)
)
.squeeze(-1)
.roll(-1, 0)
)
qs_values_bootstrap[-1] = 0.0
loss = F.mse_loss(
qh_values,
episode.rewards + discount * qs_values_bootstrap,
)
return loss
| from __future__ import annotations
from typing import Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from asym_rlpo.data import Episode, Torch_O, Torch_S
from .base import PO_EpisodicDQN_ABC
class ADQN_State(PO_EpisodicDQN_ABC):
model_keys = {
'agent': [
'action_model',
'observation_model',
'history_model',
'qh_model',
'state_model',
'qs_model',
]
}
def compute_q_values(
self,
models: nn.ModuleDict,
actions: torch.Tensor,
observations: Torch_O,
states: Torch_S,
) -> Tuple[torch.Tensor, torch.Tensor]:
history_features = self.compute_history_features(
models.agent.action_model,
models.agent.observation_model,
models.agent.history_model,
actions,
observations,
)
qh_values = models.agent.qh_model(history_features)
state_features = models.agent.state_model(states)
qs_values = models.agent.qs_model(state_features)
return qh_values, qs_values
def qs_loss(
self,
episode: Episode,
qh_values: torch.Tensor,
qs_values: torch.Tensor,
target_qh_values: torch.Tensor,
target_qs_values: torch.Tensor,
*,
discount: float,
) -> torch.Tensor:
qs_values = qs_values.gather(1, episode.actions.unsqueeze(-1)).squeeze(
-1
)
qs_values_bootstrap = (
target_qs_values.gather(
1, target_qh_values.argmax(-1).unsqueeze(-1)
)
.squeeze(-1)
.roll(-1, 0)
)
qs_values_bootstrap[-1] = 0.0
loss = F.mse_loss(
qs_values,
episode.rewards + discount * qs_values_bootstrap,
)
return loss
def qh_loss(
self,
episode: Episode,
qh_values: torch.Tensor,
qs_values: torch.Tensor,
target_qh_values: torch.Tensor,
target_qs_values: torch.Tensor,
*,
discount: float,
) -> torch.Tensor:
loss = F.mse_loss(
qh_values,
target_qs_values,
)
return loss
def episodic_loss(
self, episodes: Sequence[Episode], *, discount: float
) -> torch.Tensor:
losses = []
for episode in episodes:
qh_values, qs_values = self.compute_q_values(
self.models,
episode.actions,
episode.observations,
episode.states,
)
with torch.no_grad():
target_qh_values, target_qs_values = self.compute_q_values(
self.target_models,
episode.actions,
episode.observations,
episode.states,
)
qs_loss = self.qs_loss(
episode,
qh_values,
qs_values,
target_qh_values,
target_qs_values,
discount=discount,
)
qh_loss = self.qh_loss(
episode,
qh_values,
qs_values,
target_qh_values,
target_qs_values,
discount=discount,
)
loss = (qs_loss + qh_loss) / 2
losses.append(loss)
return sum(losses) / len(losses) # type: ignore
class ADQN_State_Bootstrap(ADQN_State):
def qh_loss(
self,
episode: Episode,
qh_values: torch.Tensor,
qs_values: torch.Tensor,
target_qh_values: torch.Tensor,
target_qs_values: torch.Tensor,
*,
discount: float,
) -> torch.Tensor:
qh_values = qh_values.gather(1, episode.actions.unsqueeze(-1)).squeeze(
-1
)
qs_values_bootstrap = (
target_qs_values.gather(
1, target_qh_values.argmax(-1).unsqueeze(-1)
)
.squeeze(-1)
.roll(-1, 0)
)
qs_values_bootstrap[-1] = 0.0
loss = F.mse_loss(
qh_values,
episode.rewards + discount * qs_values_bootstrap,
)
return loss | it | 0.190853 | # type: ignore | 2.157664 | 2 |
pixyz/distributions/distributions.py | MokkeMeguru/pixyz-test | 0 | 6620021 | <reponame>MokkeMeguru/pixyz-test
from __future__ import print_function
import torch
import re
from torch import nn
from copy import deepcopy
from ..utils import get_dict_values, replace_dict_keys, replace_dict_keys_split, delete_dict_values,\
tolist, sum_samples, convert_latex_name
from ..losses import LogProb, Prob
class Distribution(nn.Module):
"""Distribution class. In Pixyz, all distributions are required to inherit this class.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[64], name="p1")
>>> print(p1)
Distribution:
p_{1}(x)
Network architecture:
Normal(
name=p_{1}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([64])
(loc): torch.Size([1, 64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[64], name="p2")
>>> print(p2)
Distribution:
p_{2}(x|y)
Network architecture:
Normal(
name=p_{2}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution (by neural networks)
>>> class P(Normal):
... def __init__(self):
... super().__init__(var=["x"], cond_var=["y"], name="p3")
... self.model_loc = nn.Linear(128, 64)
... self.model_scale = nn.Linear(128, 64)
... def forward(self, y):
... return {"loc": self.model_loc(y), "scale": F.softplus(self.model_scale(y))}
>>> p3 = P()
>>> print(p3)
Distribution:
p_{3}(x|y)
Network architecture:
P(
name=p_{3}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
(model_loc): Linear(in_features=128, out_features=64, bias=True)
(model_scale): Linear(in_features=128, out_features=64, bias=True)
)
"""
def __init__(self, var, cond_var=[], name="p", features_shape=torch.Size()):
"""
Parameters
----------
var : :obj:`list` of :obj:`str`
Variables of this distribution.
cond_var : :obj:`list` of :obj:`str`, defaults to []
Conditional variables of this distribution.
In case that cond_var is not empty, we must set the corresponding inputs to sample variables.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
super().__init__()
_vars = cond_var + var
if len(_vars) != len(set(_vars)):
raise ValueError("There are conflicted variables.")
self._cond_var = cond_var
self._var = var
self._features_shape = torch.Size(features_shape)
self._name = convert_latex_name(name)
self._prob_text = None
self._prob_factorized_text = None
@property
def distribution_name(self):
"""str: Name of this distribution class."""
return ""
@property
def name(self):
"""str: Name of this distribution displayed in :obj:`prob_text` and :obj:`prob_factorized_text`."""
return self._name
@name.setter
def name(self, name):
if type(name) is str:
self._name = name
return
raise ValueError("Name of the distribution class must be a string type.")
@property
def var(self):
"""list: Variables of this distribution."""
return self._var
@property
def cond_var(self):
"""list: Conditional variables of this distribution."""
return self._cond_var
@property
def input_var(self):
"""list: Input variables of this distribution.
Normally, it has same values as :attr:`cond_var`.
"""
return self._cond_var
@property
def prob_text(self):
"""str: Return a formula of the (joint) probability distribution."""
_var_text = [','.join([convert_latex_name(var_name) for var_name in self._var])]
if len(self._cond_var) != 0:
_var_text += [','.join([convert_latex_name(var_name) for var_name in self._cond_var])]
_prob_text = "{}({})".format(
self._name,
"|".join(_var_text)
)
return _prob_text
@property
def prob_factorized_text(self):
"""str: Return a formula of the factorized probability distribution."""
return self.prob_text
@property
def prob_joint_factorized_and_text(self):
"""str: Return a formula of the factorized and the (joint) probability distributions."""
if self.prob_factorized_text == self.prob_text:
prob_text = self.prob_text
else:
prob_text = "{} = {}".format(self.prob_text, self.prob_factorized_text)
return prob_text
@property
def features_shape(self):
"""torch.Size or list: Shape of features of this distribution."""
return self._features_shape
def _check_input(self, input, var=None):
"""Check the type of given input.
If the input type is :obj:`dict`, this method checks whether the input keys contains the :attr:`var` list.
In case that its type is :obj:`list` or :obj:`tensor`, it returns the output formatted in :obj:`dict`.
Parameters
----------
input : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`
Input variables.
var : :obj:`list` or :obj:`NoneType`, defaults to None
Variables to check if given input contains them.
This is set to None by default.
Returns
-------
input_dict : dict
Variables checked in this method.
Raises
------
ValueError
Raises `ValueError` if the type of input is neither :obj:`torch.Tensor`, :obj:`list`, nor :obj:`dict.
"""
if var is None:
var = self.input_var
if type(input) is torch.Tensor:
input_dict = {var[0]: input}
elif type(input) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
input_dict = dict(zip(var, input))
elif type(input) is dict:
if not (set(list(input.keys())) >= set(var)):
raise ValueError("Input keys are not valid.")
input_dict = input.copy()
else:
raise ValueError("The type of input is not valid, got %s." % type(input))
return input_dict
def get_params(self, params_dict={}):
"""This method aims to get parameters of this distributions from constant parameters set in initialization
and outputs of DNNs.
Parameters
----------
params_dict : :obj:`dict`, defaults to {}
Input parameters.
Returns
-------
output_dict : dict
Output parameters.
Examples
--------
>>> from pixyz.distributions import Normal
>>> dist_1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[1])
>>> print(dist_1)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
>>> dist_1.get_params()
{'loc': tensor([[0.]]), 'scale': tensor([[1.]])}
>>> dist_2 = Normal(loc=torch.tensor(0.), scale="z", cond_var=["z"], var=["x"])
>>> print(dist_2)
Distribution:
p(x|z)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(loc): torch.Size([1])
)
>>> dist_2.get_params({"z": torch.tensor(1.)})
{'scale': tensor(1.), 'loc': tensor([0.])}
"""
raise NotImplementedError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True,
reparam=False):
"""Sample variables of this distribution.
If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`.
Parameters
----------
x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {}
Input variables.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
return_all : :obj:`bool`, defaults to True
Choose whether the output contains input variables.
reparam : :obj:`bool`, defaults to False.
Choose whether we sample variables with re-parameterized trick.
Returns
-------
output : dict
Samples of this distribution.
Examples
--------
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10, 2])
>>> print(p)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([10, 2])
(loc): torch.Size([1, 10, 2])
(scale): torch.Size([1, 10, 2])
)
>>> p.sample()["x"].shape # (batch_n=1, features_shape)
torch.Size([1, 10, 2])
>>> p.sample(batch_n=20)["x"].shape # (batch_n, features_shape)
torch.Size([20, 10, 2])
>>> p.sample(batch_n=20, sample_shape=[40, 30])["x"].shape # (sample_shape, batch_n, features_shape)
torch.Size([40, 30, 20, 10, 2])
>>> # Conditional distribution
>>> p = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10])
>>> print(p)
Distribution:
p(x|y)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([10])
(scale): torch.Size([1, 10])
)
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> sample_a = torch.randn(1, 10) # Psuedo data
>>> sample = p.sample({"y": sample_y})
>>> print(sample) # input_var + var # doctest: +SKIP
{'y': tensor([[-0.5182, 0.3484, 0.9042, 0.1914, 0.6905,
-1.0859, -0.4433, -0.0255, 0.8198, 0.4571]]),
'x': tensor([[-0.7205, -1.3996, 0.5528, -0.3059, 0.5384,
-1.4976, -0.1480, 0.0841,0.3321, 0.5561]])}
>>> sample = p.sample({"y": sample_y, "a": sample_a}) # Redundant input ("a")
>>> print(sample) # input_var + var + "a" (redundant input) # doctest: +SKIP
{'y': tensor([[ 1.3582, -1.1151, -0.8111, 1.0630, 1.1633,
0.3855, 2.6324, -0.9357, -0.8649, -0.6015]]),
'a': tensor([[-0.1874, 1.7958, -1.4084, -2.5646, 1.0868,
-0.7523, -0.0852, -2.4222, -0.3914, -0.9755]]),
'x': tensor([[-0.3272, -0.5222, -1.3659, 1.8386, 2.3204,
0.3686, 0.6311, -1.1208, 0.3656, -0.6683]])}
"""
raise NotImplementedError()
def sample_mean(self, x_dict={}):
"""Return the mean of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> mean = p1.sample_mean()
>>> print(mean)
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> mean = p2.sample_mean({"y": sample_y})
>>> print(mean) # doctest: +SKIP
tensor([[-0.2189, -1.0310, -0.1917, -0.3085, 1.5190, -0.9037, 1.2559, 0.1410,
1.2810, -0.6681]])
"""
raise NotImplementedError()
def sample_variance(self, x_dict={}):
"""Return the variance of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> var = p1.sample_variance()
>>> print(var)
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> var = p2.sample_variance({"y": sample_y})
>>> print(var) # doctest: +SKIP
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
"""
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
"""Giving variables, this method returns values of log-pdf.
Parameters
----------
x_dict : dict
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by `feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
log_prob : torch.Tensor
Values of log-probability density/mass function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
raise NotImplementedError()
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
"""Giving variables, this method returns values of entropy.
Parameters
----------
x_dict : dict, defaults to {}
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
entropy : torch.Tensor
Values of entropy.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> entropy = p1.get_entropy()
>>> print(entropy)
tensor([14.1894])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> entropy = p2.get_entropy({"y": sample_y})
>>> print(entropy)
tensor([14.1894])
"""
raise NotImplementedError()
def log_prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.LogProb`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some axes (dimensions) which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set axes to sum across the output.
Returns
-------
pixyz.losses.LogProb
An instance of :class:`pixyz.losses.LogProb`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob().eval({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob().eval({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
return LogProb(self, sum_features=sum_features, feature_dims=feature_dims)
def prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.LogProb`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Choose whether the output is summed across some axes (dimensions)
which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output. (Note: this parameter is not used for now.)
Returns
-------
pixyz.losses.Prob
An instance of :class:`pixyz.losses.Prob`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> prob = p1.prob().eval({"x": sample_x})
>>> print(prob) # doctest: +SKIP
tensor([4.0933e-07])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> prob = p2.prob().eval({"x": sample_x, "y": sample_y})
>>> print(prob) # doctest: +SKIP
tensor([2.9628e-09])
"""
return Prob(self, sum_features=sum_features, feature_dims=feature_dims)
def forward(self, *args, **kwargs):
"""When this class is inherited by DNNs, this method should be overrided."""
raise NotImplementedError()
def replace_var(self, **replace_dict):
"""Return an instance of :class:`pixyz.distributions.ReplaceVarDistribution`.
Parameters
----------
replace_dict : dict
Dictionary.
Returns
-------
pixyz.distributions.ReplaceVarDistribution
An instance of :class:`pixyz.distributions.ReplaceVarDistribution`
"""
return ReplaceVarDistribution(self, replace_dict)
def marginalize_var(self, marginalize_list):
"""Return an instance of :class:`pixyz.distributions.MarginalizeVarDistribution`.
Parameters
----------
marginalize_list : :obj:`list` or other
Variables to marginalize.
Returns
-------
pixyz.distributions.MarginalizeVarDistribution
An instance of :class:`pixyz.distributions.MarginalizeVarDistribution`
"""
marginalize_list = tolist(marginalize_list)
return MarginalizeVarDistribution(self, marginalize_list)
def __mul__(self, other):
return MultiplyDistribution(self, other)
def __str__(self):
# Distribution
text = "Distribution:\n {}\n".format(self.prob_joint_factorized_and_text)
# Network architecture (`repr`)
network_text = self.__repr__()
network_text = re.sub('^', ' ' * 2, str(network_text), flags=re.MULTILINE)
text += "Network architecture:\n{}".format(network_text)
return text
def extra_repr(self):
# parameters
parameters_text = 'name={}, distribution_name={},\n' \
'var={}, cond_var={}, input_var={}, ' \
'features_shape={}'.format(self.name, self.distribution_name,
self.var, self.cond_var, self.input_var,
self.features_shape
)
if len(self._buffers) != 0:
# add buffers to repr
buffers = ["({}): {}".format(key, value.shape) for key, value in self._buffers.items()]
return parameters_text + "\n" + "\n".join(buffers)
return parameters_text
class DistributionBase(Distribution):
"""Distribution class with PyTorch. In Pixyz, all distributions are required to inherit this class."""
def __init__(self, cond_var=[], var=["x"], name="p", features_shape=torch.Size(), **kwargs):
super().__init__(cond_var=cond_var, var=var, name=name, features_shape=features_shape)
self._set_buffers(**kwargs)
self._dist = None
def _set_buffers(self, **params_dict):
"""Format constant parameters of this distribution as buffers.
Parameters
----------
params_dict : dict
Constant parameters of this distribution set at initialization.
If the values of these dictionaries contain parameters which are named as strings, which means that
these parameters are set as `variables`, the correspondences between these values and the true name of
these parameters are stored as :obj:`dict` (:attr:`replace_params_dict`).
"""
self.replace_params_dict = {}
for key in params_dict.keys():
if type(params_dict[key]) is str:
if params_dict[key] in self._cond_var:
self.replace_params_dict[params_dict[key]] = key
else:
raise ValueError()
elif isinstance(params_dict[key], torch.Tensor):
features = params_dict[key]
features_checked = self._check_features_shape(features)
self.register_buffer(key, features_checked)
else:
raise ValueError()
def _check_features_shape(self, features):
# scalar
if features.size() == torch.Size():
features = features.expand(self.features_shape)
if self.features_shape == torch.Size():
self._features_shape = features.shape
if features.size() == self.features_shape:
batches = features.unsqueeze(0)
return batches
raise ValueError("the shape of a given parameter {} and features_shape {} "
"do not match.".format(features.size(), self.features_shape))
@property
def params_keys(self):
"""list: Return the list of parameter names for this distribution."""
raise NotImplementedError()
@property
def distribution_torch_class(self):
"""Return the class of PyTorch distribution."""
raise NotImplementedError()
@property
def dist(self):
"""Return the instance of PyTorch distribution."""
return self._dist
def set_dist(self, x_dict={}, sampling=False, batch_n=None, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
sampling : :obj:`bool`, defaults to False.
Choose whether to use relaxed_* in PyTorch distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError()
self._dist = self.distribution_torch_class(**params)
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError()
def get_sample(self, reparam=False, sample_shape=torch.Size()):
"""Get a sample_shape shaped sample from :attr:`dist`.
Parameters
----------
reparam : :obj:`bool`, defaults to True.
Choose where to sample using re-parameterization trick.
sample_shape : :obj:`tuple` or :obj:`torch.Size`, defaults to torch.Size().
Set the shape of a generated sample.
Returns
-------
samples_dict : dict
Generated sample formatted by :obj:`dict`.
"""
if reparam:
try:
_samples = self.dist.rsample(sample_shape=sample_shape)
except NotImplementedError():
raise ValueError("You cannot use the re-parameterization trick for this distribution.")
else:
_samples = self.dist.sample(sample_shape=sample_shape)
samples_dict = {self._var[0]: _samples}
return samples_dict
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict, sampling=False)
x_targets = get_dict_values(x_dict, self._var)
log_prob = self.dist.log_prob(*x_targets)
if sum_features:
log_prob = sum_samples(log_prob)
return log_prob
def get_params(self, params_dict={}):
params_dict, vars_dict = replace_dict_keys_split(params_dict, self.replace_params_dict)
output_dict = self.forward(**vars_dict)
output_dict.update(params_dict)
# append constant parameters to output_dict
constant_params_dict = get_dict_values(dict(self.named_buffers()), self.params_keys, return_dict=True)
output_dict.update(constant_params_dict)
return output_dict
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict, sampling=False)
entropy = self.dist.entropy()
if sum_features:
entropy = sum_samples(entropy)
return entropy
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False):
# check whether the input is valid or convert it to valid dictionary.
x_dict = self._check_input(x_dict)
input_dict = {}
# conditioned
if len(self.input_var) != 0:
input_dict.update(get_dict_values(x_dict, self.input_var, return_dict=True))
self.set_dist(input_dict, batch_n=batch_n)
output_dict = self.get_sample(reparam=reparam,
sample_shape=sample_shape)
if return_all:
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.mean
def sample_variance(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.variance
def forward(self, **params):
return params
class MultiplyDistribution(Distribution):
"""Multiply by given distributions, e.g, :math:`p(x,y|z) = p(x|z,y)p(y|z)`.
In this class, it is checked if two distributions can be multiplied.
p(x|z)p(z|y) -> Valid
p(x|z)p(y|z) -> Valid
p(x|z)p(y|a) -> Valid
p(x|z)p(z|x) -> Invalid (recursive)
p(x|z)p(x|y) -> Invalid (conflict)
Examples
--------
>>> a = DistributionBase(var=["x"], cond_var=["z"])
>>> b = DistributionBase(var=["z"], cond_var=["y"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,z|y) = p(x|z)p(z|y)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"], cond_var=["z"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"], cond_var=["a"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z,a) = p(x|z)p(y|a)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['a'], input_var=['a'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, a, b):
"""
Parameters
----------
a : pixyz.Distribution
Distribution.
b : pixyz.Distribution
Distribution.
"""
if not (isinstance(a, Distribution) and isinstance(b, Distribution)):
raise ValueError("Given inputs should be `pixyz.Distribution`, got {} and {}.".format(type(a), type(b)))
# Check parent-child relationship between two distributions.
# If inherited variables (`_inh_var`) are exist (e.g. c in p(e|c)p(c|a,b)),
# then p(e|c) is a child and p(c|a,b) is a parent, otherwise it is opposite.
_vars_a_b = a.cond_var + b.var
_vars_b_a = b.cond_var + a.var
_inh_var_a_b = [var for var in set(_vars_a_b) if _vars_a_b.count(var) > 1]
_inh_var_b_a = [var for var in set(_vars_b_a) if _vars_b_a.count(var) > 1]
if len(_inh_var_a_b) > 0:
_child = a
_parent = b
_inh_var = _inh_var_a_b
elif len(_inh_var_b_a) > 0:
_child = b
_parent = a
_inh_var = _inh_var_b_a
else:
_child = a
_parent = b
_inh_var = []
# Check if variables of two distributions are "recursive" (e.g. p(x|z)p(z|x)).
_check_recursive_vars = _child.var + _parent.cond_var
if len(_check_recursive_vars) != len(set(_check_recursive_vars)):
raise ValueError("Variables of two distributions, {} and {}, are recursive.".format(_child.prob_text,
_parent.prob_text))
# Set variables.
_var = _child.var + _parent.var
if len(_var) != len(set(_var)): # e.g. p(x|z)p(x|y)
raise ValueError("Variables of two distributions, {} and {}, are conflicted.".format(_child.prob_text,
_parent.prob_text))
# Set conditional variables.
_cond_var = _child.cond_var + _parent.cond_var
_cond_var = sorted(set(_cond_var), key=_cond_var.index)
# Delete inh_var in conditional variables.
_cond_var = [var for var in _cond_var if var not in _inh_var]
super().__init__(cond_var=_cond_var, var=_var)
self._parent = _parent
self._child = _child
# Set input_var (it might be different from cond_var if either a and b contain data distributions.)
_input_var = [var for var in self._child.input_var if var not in _inh_var]
_input_var += self._parent.input_var
self._input_var = sorted(set(_input_var), key=_input_var.index)
@property
def input_var(self):
return self._input_var
@property
def prob_factorized_text(self):
return self._child.prob_factorized_text + self._parent.prob_factorized_text
def sample(self, x_dict={}, batch_n=None, return_all=True, reparam=False, **kwargs):
# sample from the parent distribution
parents_x_dict = x_dict
child_x_dict = self._parent.sample(x_dict=parents_x_dict, batch_n=batch_n,
return_all=True, reparam=reparam)
# sample from the child distribution
output_dict = self._child.sample(x_dict=child_x_dict, batch_n=batch_n,
return_all=True, reparam=reparam)
if return_all is False:
output_dict = get_dict_values(output_dict, self._var, return_dict=True)
return output_dict
return output_dict
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
parent_log_prob = self._parent.get_log_prob(x_dict, sum_features=sum_features, feature_dims=feature_dims)
child_log_prob = self._child.get_log_prob(x_dict, sum_features=sum_features, feature_dims=feature_dims)
if sum_features:
return parent_log_prob + child_log_prob
if parent_log_prob.size() == child_log_prob.size():
return parent_log_prob + child_log_prob
raise ValueError("Two PDFs, {} and {}, have different sizes,"
" so you must set sum_dim=True.".format(self._parent.prob_text, self._child.prob_text))
def __repr__(self):
return self._parent.__repr__() + "\n" + self._child.__repr__()
class ReplaceVarDistribution(Distribution):
"""Replace names of variables in Distribution.
Examples
--------
>>> p = DistributionBase(var=["x"], cond_var=["z"])
>>> print(p)
Distribution:
p(x|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> replace_dict = {'x': 'y'}
>>> p_repl = ReplaceVarDistribution(p, replace_dict)
>>> print(p_repl)
Distribution:
p(y|z)
Network architecture:
ReplaceVarDistribution(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(p): DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
)
"""
def __init__(self, p, replace_dict):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.MultiplyDistribution`)
Distribution.
replace_dict : dict
Dictionary.
"""
if not isinstance(p, Distribution):
raise ValueError("Given input should be `pixyz.Distribution`, got {}.".format(type(p)))
if isinstance(p, MultiplyDistribution):
raise ValueError("`pixyz.MultiplyDistribution` is not supported for now.")
if isinstance(p, MarginalizeVarDistribution):
raise ValueError("`pixyz.MarginalizeVarDistribution` is not supported for now.")
_cond_var = deepcopy(p.cond_var)
_var = deepcopy(p.var)
all_vars = _cond_var + _var
if not (set(replace_dict.keys()) <= set(all_vars)):
raise ValueError()
_replace_inv_cond_var_dict = {replace_dict[var]: var for var in _cond_var if var in replace_dict.keys()}
_replace_inv_dict = {value: key for key, value in replace_dict.items()}
self._replace_inv_cond_var_dict = _replace_inv_cond_var_dict
self._replace_inv_dict = _replace_inv_dict
self._replace_dict = replace_dict
_cond_var = [replace_dict[var] if var in replace_dict.keys() else var for var in _cond_var]
_var = [replace_dict[var] if var in replace_dict.keys() else var for var in _var]
super().__init__(cond_var=_cond_var, var=_var, name=p.name, features_shape=p.features_shape)
self.p = p
_input_var = [replace_dict[var] if var in replace_dict.keys() else var for var in p.input_var]
self._input_var = _input_var
def forward(self, *args, **kwargs):
return self.p.forward(*args, **kwargs)
def get_params(self, params_dict={}):
params_dict = replace_dict_keys(params_dict, self._replace_inv_cond_var_dict)
return self.p.get_params(params_dict)
def set_dist(self, x_dict={}, sampling=False, batch_n=None, **kwargs):
x_dict = replace_dict_keys(x_dict, self._replace_inv_cond_var_dict)
return self.p.set_dist(x_dict=x_dict, sampling=sampling, batch_n=batch_n, **kwargs)
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False, **kwargs):
input_dict = get_dict_values(x_dict, self.cond_var, return_dict=True)
replaced_input_dict = replace_dict_keys(input_dict, self._replace_inv_cond_var_dict)
output_dict = self.p.sample(replaced_input_dict, batch_n=batch_n, sample_shape=sample_shape,
return_all=False, reparam=reparam, **kwargs)
output_dict = replace_dict_keys(output_dict, self._replace_dict)
x_dict.update(output_dict)
return x_dict
def get_log_prob(self, x_dict, **kwargs):
input_dict = get_dict_values(x_dict, self.cond_var + self.var, return_dict=True)
input_dict = replace_dict_keys(input_dict, self._replace_inv_dict)
return self.p.get_log_prob(input_dict, **kwargs)
def sample_mean(self, x_dict={}):
input_dict = get_dict_values(x_dict, self.cond_var, return_dict=True)
input_dict = replace_dict_keys(input_dict, self._replace_inv_cond_var_dict)
return self.p.sample_mean(input_dict)
def sample_variance(self, x_dict={}):
input_dict = get_dict_values(x_dict, self.cond_var, return_dict=True)
input_dict = replace_dict_keys(input_dict, self._replace_inv_cond_var_dict)
return self.p.sample_variance(input_dict)
@property
def input_var(self):
return self._input_var
@property
def distribution_name(self):
return self.p.distribution_name
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
return self.p.__getattribute__(item)
class MarginalizeVarDistribution(Distribution):
r"""Marginalize variables in Distribution.
.. math::
p(x) = \int p(x,z) dz
Examples
--------
>>> a = DistributionBase(var=["x"], cond_var=["z"])
>>> b = DistributionBase(var=["y"], cond_var=["z"])
>>> p_multi = a * b
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> p_marg = MarginalizeVarDistribution(p_multi, ["y"])
>>> print(p_marg)
Distribution:
p(x|z) = \int p(x|z)p(y|z)dy
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, p, marginalize_list):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.DistributionBase`)
Distribution.
marginalize_list : list
Variables to marginalize.
"""
marginalize_list = tolist(marginalize_list)
if not isinstance(p, Distribution):
raise ValueError("Given input must be `pixyz.distributions.Distribution`, got {}.".format(type(p)))
if isinstance(p, DistributionBase):
raise ValueError("`pixyz.distributions.DistributionBase` cannot be marginalized its variables.")
_var = deepcopy(p.var)
_cond_var = deepcopy(p.cond_var)
if not((set(marginalize_list)) < set(_var)):
raise ValueError()
if not((set(marginalize_list)).isdisjoint(set(_cond_var))):
raise ValueError()
if len(marginalize_list) == 0:
raise ValueError("Length of `marginalize_list` must be at least 1, got 0.")
_var = [var for var in _var if var not in marginalize_list]
super().__init__(cond_var=_cond_var, var=_var, name=p.name, features_shape=p.features_shape)
self.p = p
self._marginalize_list = marginalize_list
def forward(self, *args, **kwargs):
return self.p.forward(*args, **kwargs)
def get_params(self, params_dict={}):
return self.p.get_params(params_dict)
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False, **kwargs):
output_dict = self.p.sample(x_dict=x_dict, batch_n=batch_n, sample_shape=sample_shape, return_all=return_all,
reparam=reparam, **kwargs)
output_dict = delete_dict_values(output_dict, self._marginalize_list)
return output_dict
def sample_mean(self, x_dict={}):
return self.p.sample_mean(x_dict)
def sample_variance(self, x_dict={}):
return self.p.sample_variance(x_dict)
@property
def input_var(self):
return self.p.input_var
@property
def distribution_name(self):
return self.p.distribution_name
@property
def prob_factorized_text(self):
integral_symbol = len(self._marginalize_list) * "\\int "
integral_variables = ["d" + str(var) for var in self._marginalize_list]
integral_variables = "".join(integral_variables)
return "{}{}{}".format(integral_symbol, self.p.prob_factorized_text, integral_variables)
def __repr__(self):
return self.p.__repr__()
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
return self.p.__getattribute__(item)
| from __future__ import print_function
import torch
import re
from torch import nn
from copy import deepcopy
from ..utils import get_dict_values, replace_dict_keys, replace_dict_keys_split, delete_dict_values,\
tolist, sum_samples, convert_latex_name
from ..losses import LogProb, Prob
class Distribution(nn.Module):
"""Distribution class. In Pixyz, all distributions are required to inherit this class.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[64], name="p1")
>>> print(p1)
Distribution:
p_{1}(x)
Network architecture:
Normal(
name=p_{1}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([64])
(loc): torch.Size([1, 64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[64], name="p2")
>>> print(p2)
Distribution:
p_{2}(x|y)
Network architecture:
Normal(
name=p_{2}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution (by neural networks)
>>> class P(Normal):
... def __init__(self):
... super().__init__(var=["x"], cond_var=["y"], name="p3")
... self.model_loc = nn.Linear(128, 64)
... self.model_scale = nn.Linear(128, 64)
... def forward(self, y):
... return {"loc": self.model_loc(y), "scale": F.softplus(self.model_scale(y))}
>>> p3 = P()
>>> print(p3)
Distribution:
p_{3}(x|y)
Network architecture:
P(
name=p_{3}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
(model_loc): Linear(in_features=128, out_features=64, bias=True)
(model_scale): Linear(in_features=128, out_features=64, bias=True)
)
"""
def __init__(self, var, cond_var=[], name="p", features_shape=torch.Size()):
"""
Parameters
----------
var : :obj:`list` of :obj:`str`
Variables of this distribution.
cond_var : :obj:`list` of :obj:`str`, defaults to []
Conditional variables of this distribution.
In case that cond_var is not empty, we must set the corresponding inputs to sample variables.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
super().__init__()
_vars = cond_var + var
if len(_vars) != len(set(_vars)):
raise ValueError("There are conflicted variables.")
self._cond_var = cond_var
self._var = var
self._features_shape = torch.Size(features_shape)
self._name = convert_latex_name(name)
self._prob_text = None
self._prob_factorized_text = None
@property
def distribution_name(self):
"""str: Name of this distribution class."""
return ""
@property
def name(self):
"""str: Name of this distribution displayed in :obj:`prob_text` and :obj:`prob_factorized_text`."""
return self._name
@name.setter
def name(self, name):
if type(name) is str:
self._name = name
return
raise ValueError("Name of the distribution class must be a string type.")
@property
def var(self):
"""list: Variables of this distribution."""
return self._var
@property
def cond_var(self):
"""list: Conditional variables of this distribution."""
return self._cond_var
@property
def input_var(self):
"""list: Input variables of this distribution.
Normally, it has same values as :attr:`cond_var`.
"""
return self._cond_var
@property
def prob_text(self):
"""str: Return a formula of the (joint) probability distribution."""
_var_text = [','.join([convert_latex_name(var_name) for var_name in self._var])]
if len(self._cond_var) != 0:
_var_text += [','.join([convert_latex_name(var_name) for var_name in self._cond_var])]
_prob_text = "{}({})".format(
self._name,
"|".join(_var_text)
)
return _prob_text
@property
def prob_factorized_text(self):
"""str: Return a formula of the factorized probability distribution."""
return self.prob_text
@property
def prob_joint_factorized_and_text(self):
"""str: Return a formula of the factorized and the (joint) probability distributions."""
if self.prob_factorized_text == self.prob_text:
prob_text = self.prob_text
else:
prob_text = "{} = {}".format(self.prob_text, self.prob_factorized_text)
return prob_text
@property
def features_shape(self):
"""torch.Size or list: Shape of features of this distribution."""
return self._features_shape
def _check_input(self, input, var=None):
"""Check the type of given input.
If the input type is :obj:`dict`, this method checks whether the input keys contains the :attr:`var` list.
In case that its type is :obj:`list` or :obj:`tensor`, it returns the output formatted in :obj:`dict`.
Parameters
----------
input : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`
Input variables.
var : :obj:`list` or :obj:`NoneType`, defaults to None
Variables to check if given input contains them.
This is set to None by default.
Returns
-------
input_dict : dict
Variables checked in this method.
Raises
------
ValueError
Raises `ValueError` if the type of input is neither :obj:`torch.Tensor`, :obj:`list`, nor :obj:`dict.
"""
if var is None:
var = self.input_var
if type(input) is torch.Tensor:
input_dict = {var[0]: input}
elif type(input) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
input_dict = dict(zip(var, input))
elif type(input) is dict:
if not (set(list(input.keys())) >= set(var)):
raise ValueError("Input keys are not valid.")
input_dict = input.copy()
else:
raise ValueError("The type of input is not valid, got %s." % type(input))
return input_dict
def get_params(self, params_dict={}):
"""This method aims to get parameters of this distributions from constant parameters set in initialization
and outputs of DNNs.
Parameters
----------
params_dict : :obj:`dict`, defaults to {}
Input parameters.
Returns
-------
output_dict : dict
Output parameters.
Examples
--------
>>> from pixyz.distributions import Normal
>>> dist_1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[1])
>>> print(dist_1)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
>>> dist_1.get_params()
{'loc': tensor([[0.]]), 'scale': tensor([[1.]])}
>>> dist_2 = Normal(loc=torch.tensor(0.), scale="z", cond_var=["z"], var=["x"])
>>> print(dist_2)
Distribution:
p(x|z)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(loc): torch.Size([1])
)
>>> dist_2.get_params({"z": torch.tensor(1.)})
{'scale': tensor(1.), 'loc': tensor([0.])}
"""
raise NotImplementedError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True,
reparam=False):
"""Sample variables of this distribution.
If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`.
Parameters
----------
x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {}
Input variables.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
return_all : :obj:`bool`, defaults to True
Choose whether the output contains input variables.
reparam : :obj:`bool`, defaults to False.
Choose whether we sample variables with re-parameterized trick.
Returns
-------
output : dict
Samples of this distribution.
Examples
--------
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10, 2])
>>> print(p)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([10, 2])
(loc): torch.Size([1, 10, 2])
(scale): torch.Size([1, 10, 2])
)
>>> p.sample()["x"].shape # (batch_n=1, features_shape)
torch.Size([1, 10, 2])
>>> p.sample(batch_n=20)["x"].shape # (batch_n, features_shape)
torch.Size([20, 10, 2])
>>> p.sample(batch_n=20, sample_shape=[40, 30])["x"].shape # (sample_shape, batch_n, features_shape)
torch.Size([40, 30, 20, 10, 2])
>>> # Conditional distribution
>>> p = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10])
>>> print(p)
Distribution:
p(x|y)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([10])
(scale): torch.Size([1, 10])
)
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> sample_a = torch.randn(1, 10) # Psuedo data
>>> sample = p.sample({"y": sample_y})
>>> print(sample) # input_var + var # doctest: +SKIP
{'y': tensor([[-0.5182, 0.3484, 0.9042, 0.1914, 0.6905,
-1.0859, -0.4433, -0.0255, 0.8198, 0.4571]]),
'x': tensor([[-0.7205, -1.3996, 0.5528, -0.3059, 0.5384,
-1.4976, -0.1480, 0.0841,0.3321, 0.5561]])}
>>> sample = p.sample({"y": sample_y, "a": sample_a}) # Redundant input ("a")
>>> print(sample) # input_var + var + "a" (redundant input) # doctest: +SKIP
{'y': tensor([[ 1.3582, -1.1151, -0.8111, 1.0630, 1.1633,
0.3855, 2.6324, -0.9357, -0.8649, -0.6015]]),
'a': tensor([[-0.1874, 1.7958, -1.4084, -2.5646, 1.0868,
-0.7523, -0.0852, -2.4222, -0.3914, -0.9755]]),
'x': tensor([[-0.3272, -0.5222, -1.3659, 1.8386, 2.3204,
0.3686, 0.6311, -1.1208, 0.3656, -0.6683]])}
"""
raise NotImplementedError()
def sample_mean(self, x_dict={}):
"""Return the mean of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> mean = p1.sample_mean()
>>> print(mean)
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> mean = p2.sample_mean({"y": sample_y})
>>> print(mean) # doctest: +SKIP
tensor([[-0.2189, -1.0310, -0.1917, -0.3085, 1.5190, -0.9037, 1.2559, 0.1410,
1.2810, -0.6681]])
"""
raise NotImplementedError()
def sample_variance(self, x_dict={}):
"""Return the variance of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> var = p1.sample_variance()
>>> print(var)
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> var = p2.sample_variance({"y": sample_y})
>>> print(var) # doctest: +SKIP
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
"""
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
"""Giving variables, this method returns values of log-pdf.
Parameters
----------
x_dict : dict
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by `feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
log_prob : torch.Tensor
Values of log-probability density/mass function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
raise NotImplementedError()
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
"""Giving variables, this method returns values of entropy.
Parameters
----------
x_dict : dict, defaults to {}
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
entropy : torch.Tensor
Values of entropy.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> entropy = p1.get_entropy()
>>> print(entropy)
tensor([14.1894])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> entropy = p2.get_entropy({"y": sample_y})
>>> print(entropy)
tensor([14.1894])
"""
raise NotImplementedError()
def log_prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.LogProb`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some axes (dimensions) which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set axes to sum across the output.
Returns
-------
pixyz.losses.LogProb
An instance of :class:`pixyz.losses.LogProb`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob().eval({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob().eval({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
return LogProb(self, sum_features=sum_features, feature_dims=feature_dims)
def prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.LogProb`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Choose whether the output is summed across some axes (dimensions)
which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output. (Note: this parameter is not used for now.)
Returns
-------
pixyz.losses.Prob
An instance of :class:`pixyz.losses.Prob`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> prob = p1.prob().eval({"x": sample_x})
>>> print(prob) # doctest: +SKIP
tensor([4.0933e-07])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> prob = p2.prob().eval({"x": sample_x, "y": sample_y})
>>> print(prob) # doctest: +SKIP
tensor([2.9628e-09])
"""
return Prob(self, sum_features=sum_features, feature_dims=feature_dims)
def forward(self, *args, **kwargs):
"""When this class is inherited by DNNs, this method should be overrided."""
raise NotImplementedError()
def replace_var(self, **replace_dict):
"""Return an instance of :class:`pixyz.distributions.ReplaceVarDistribution`.
Parameters
----------
replace_dict : dict
Dictionary.
Returns
-------
pixyz.distributions.ReplaceVarDistribution
An instance of :class:`pixyz.distributions.ReplaceVarDistribution`
"""
return ReplaceVarDistribution(self, replace_dict)
def marginalize_var(self, marginalize_list):
"""Return an instance of :class:`pixyz.distributions.MarginalizeVarDistribution`.
Parameters
----------
marginalize_list : :obj:`list` or other
Variables to marginalize.
Returns
-------
pixyz.distributions.MarginalizeVarDistribution
An instance of :class:`pixyz.distributions.MarginalizeVarDistribution`
"""
marginalize_list = tolist(marginalize_list)
return MarginalizeVarDistribution(self, marginalize_list)
def __mul__(self, other):
return MultiplyDistribution(self, other)
def __str__(self):
# Distribution
text = "Distribution:\n {}\n".format(self.prob_joint_factorized_and_text)
# Network architecture (`repr`)
network_text = self.__repr__()
network_text = re.sub('^', ' ' * 2, str(network_text), flags=re.MULTILINE)
text += "Network architecture:\n{}".format(network_text)
return text
def extra_repr(self):
# parameters
parameters_text = 'name={}, distribution_name={},\n' \
'var={}, cond_var={}, input_var={}, ' \
'features_shape={}'.format(self.name, self.distribution_name,
self.var, self.cond_var, self.input_var,
self.features_shape
)
if len(self._buffers) != 0:
# add buffers to repr
buffers = ["({}): {}".format(key, value.shape) for key, value in self._buffers.items()]
return parameters_text + "\n" + "\n".join(buffers)
return parameters_text
class DistributionBase(Distribution):
"""Distribution class with PyTorch. In Pixyz, all distributions are required to inherit this class."""
def __init__(self, cond_var=[], var=["x"], name="p", features_shape=torch.Size(), **kwargs):
super().__init__(cond_var=cond_var, var=var, name=name, features_shape=features_shape)
self._set_buffers(**kwargs)
self._dist = None
def _set_buffers(self, **params_dict):
"""Format constant parameters of this distribution as buffers.
Parameters
----------
params_dict : dict
Constant parameters of this distribution set at initialization.
If the values of these dictionaries contain parameters which are named as strings, which means that
these parameters are set as `variables`, the correspondences between these values and the true name of
these parameters are stored as :obj:`dict` (:attr:`replace_params_dict`).
"""
self.replace_params_dict = {}
for key in params_dict.keys():
if type(params_dict[key]) is str:
if params_dict[key] in self._cond_var:
self.replace_params_dict[params_dict[key]] = key
else:
raise ValueError()
elif isinstance(params_dict[key], torch.Tensor):
features = params_dict[key]
features_checked = self._check_features_shape(features)
self.register_buffer(key, features_checked)
else:
raise ValueError()
def _check_features_shape(self, features):
# scalar
if features.size() == torch.Size():
features = features.expand(self.features_shape)
if self.features_shape == torch.Size():
self._features_shape = features.shape
if features.size() == self.features_shape:
batches = features.unsqueeze(0)
return batches
raise ValueError("the shape of a given parameter {} and features_shape {} "
"do not match.".format(features.size(), self.features_shape))
@property
def params_keys(self):
"""list: Return the list of parameter names for this distribution."""
raise NotImplementedError()
@property
def distribution_torch_class(self):
"""Return the class of PyTorch distribution."""
raise NotImplementedError()
@property
def dist(self):
"""Return the instance of PyTorch distribution."""
return self._dist
def set_dist(self, x_dict={}, sampling=False, batch_n=None, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
sampling : :obj:`bool`, defaults to False.
Choose whether to use relaxed_* in PyTorch distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError()
self._dist = self.distribution_torch_class(**params)
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError()
def get_sample(self, reparam=False, sample_shape=torch.Size()):
"""Get a sample_shape shaped sample from :attr:`dist`.
Parameters
----------
reparam : :obj:`bool`, defaults to True.
Choose where to sample using re-parameterization trick.
sample_shape : :obj:`tuple` or :obj:`torch.Size`, defaults to torch.Size().
Set the shape of a generated sample.
Returns
-------
samples_dict : dict
Generated sample formatted by :obj:`dict`.
"""
if reparam:
try:
_samples = self.dist.rsample(sample_shape=sample_shape)
except NotImplementedError():
raise ValueError("You cannot use the re-parameterization trick for this distribution.")
else:
_samples = self.dist.sample(sample_shape=sample_shape)
samples_dict = {self._var[0]: _samples}
return samples_dict
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict, sampling=False)
x_targets = get_dict_values(x_dict, self._var)
log_prob = self.dist.log_prob(*x_targets)
if sum_features:
log_prob = sum_samples(log_prob)
return log_prob
def get_params(self, params_dict={}):
params_dict, vars_dict = replace_dict_keys_split(params_dict, self.replace_params_dict)
output_dict = self.forward(**vars_dict)
output_dict.update(params_dict)
# append constant parameters to output_dict
constant_params_dict = get_dict_values(dict(self.named_buffers()), self.params_keys, return_dict=True)
output_dict.update(constant_params_dict)
return output_dict
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict, sampling=False)
entropy = self.dist.entropy()
if sum_features:
entropy = sum_samples(entropy)
return entropy
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False):
# check whether the input is valid or convert it to valid dictionary.
x_dict = self._check_input(x_dict)
input_dict = {}
# conditioned
if len(self.input_var) != 0:
input_dict.update(get_dict_values(x_dict, self.input_var, return_dict=True))
self.set_dist(input_dict, batch_n=batch_n)
output_dict = self.get_sample(reparam=reparam,
sample_shape=sample_shape)
if return_all:
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.mean
def sample_variance(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.variance
def forward(self, **params):
return params
class MultiplyDistribution(Distribution):
"""Multiply by given distributions, e.g, :math:`p(x,y|z) = p(x|z,y)p(y|z)`.
In this class, it is checked if two distributions can be multiplied.
p(x|z)p(z|y) -> Valid
p(x|z)p(y|z) -> Valid
p(x|z)p(y|a) -> Valid
p(x|z)p(z|x) -> Invalid (recursive)
p(x|z)p(x|y) -> Invalid (conflict)
Examples
--------
>>> a = DistributionBase(var=["x"], cond_var=["z"])
>>> b = DistributionBase(var=["z"], cond_var=["y"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,z|y) = p(x|z)p(z|y)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"], cond_var=["z"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"], cond_var=["a"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z,a) = p(x|z)p(y|a)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['a'], input_var=['a'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, a, b):
"""
Parameters
----------
a : pixyz.Distribution
Distribution.
b : pixyz.Distribution
Distribution.
"""
if not (isinstance(a, Distribution) and isinstance(b, Distribution)):
raise ValueError("Given inputs should be `pixyz.Distribution`, got {} and {}.".format(type(a), type(b)))
# Check parent-child relationship between two distributions.
# If inherited variables (`_inh_var`) are exist (e.g. c in p(e|c)p(c|a,b)),
# then p(e|c) is a child and p(c|a,b) is a parent, otherwise it is opposite.
_vars_a_b = a.cond_var + b.var
_vars_b_a = b.cond_var + a.var
_inh_var_a_b = [var for var in set(_vars_a_b) if _vars_a_b.count(var) > 1]
_inh_var_b_a = [var for var in set(_vars_b_a) if _vars_b_a.count(var) > 1]
if len(_inh_var_a_b) > 0:
_child = a
_parent = b
_inh_var = _inh_var_a_b
elif len(_inh_var_b_a) > 0:
_child = b
_parent = a
_inh_var = _inh_var_b_a
else:
_child = a
_parent = b
_inh_var = []
# Check if variables of two distributions are "recursive" (e.g. p(x|z)p(z|x)).
_check_recursive_vars = _child.var + _parent.cond_var
if len(_check_recursive_vars) != len(set(_check_recursive_vars)):
raise ValueError("Variables of two distributions, {} and {}, are recursive.".format(_child.prob_text,
_parent.prob_text))
# Set variables.
_var = _child.var + _parent.var
if len(_var) != len(set(_var)): # e.g. p(x|z)p(x|y)
raise ValueError("Variables of two distributions, {} and {}, are conflicted.".format(_child.prob_text,
_parent.prob_text))
# Set conditional variables.
_cond_var = _child.cond_var + _parent.cond_var
_cond_var = sorted(set(_cond_var), key=_cond_var.index)
# Delete inh_var in conditional variables.
_cond_var = [var for var in _cond_var if var not in _inh_var]
super().__init__(cond_var=_cond_var, var=_var)
self._parent = _parent
self._child = _child
# Set input_var (it might be different from cond_var if either a and b contain data distributions.)
_input_var = [var for var in self._child.input_var if var not in _inh_var]
_input_var += self._parent.input_var
self._input_var = sorted(set(_input_var), key=_input_var.index)
@property
def input_var(self):
return self._input_var
@property
def prob_factorized_text(self):
return self._child.prob_factorized_text + self._parent.prob_factorized_text
def sample(self, x_dict={}, batch_n=None, return_all=True, reparam=False, **kwargs):
# sample from the parent distribution
parents_x_dict = x_dict
child_x_dict = self._parent.sample(x_dict=parents_x_dict, batch_n=batch_n,
return_all=True, reparam=reparam)
# sample from the child distribution
output_dict = self._child.sample(x_dict=child_x_dict, batch_n=batch_n,
return_all=True, reparam=reparam)
if return_all is False:
output_dict = get_dict_values(output_dict, self._var, return_dict=True)
return output_dict
return output_dict
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
parent_log_prob = self._parent.get_log_prob(x_dict, sum_features=sum_features, feature_dims=feature_dims)
child_log_prob = self._child.get_log_prob(x_dict, sum_features=sum_features, feature_dims=feature_dims)
if sum_features:
return parent_log_prob + child_log_prob
if parent_log_prob.size() == child_log_prob.size():
return parent_log_prob + child_log_prob
raise ValueError("Two PDFs, {} and {}, have different sizes,"
" so you must set sum_dim=True.".format(self._parent.prob_text, self._child.prob_text))
def __repr__(self):
return self._parent.__repr__() + "\n" + self._child.__repr__()
class ReplaceVarDistribution(Distribution):
"""Replace names of variables in Distribution.
Examples
--------
>>> p = DistributionBase(var=["x"], cond_var=["z"])
>>> print(p)
Distribution:
p(x|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> replace_dict = {'x': 'y'}
>>> p_repl = ReplaceVarDistribution(p, replace_dict)
>>> print(p_repl)
Distribution:
p(y|z)
Network architecture:
ReplaceVarDistribution(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(p): DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
)
"""
def __init__(self, p, replace_dict):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.MultiplyDistribution`)
Distribution.
replace_dict : dict
Dictionary.
"""
if not isinstance(p, Distribution):
raise ValueError("Given input should be `pixyz.Distribution`, got {}.".format(type(p)))
if isinstance(p, MultiplyDistribution):
raise ValueError("`pixyz.MultiplyDistribution` is not supported for now.")
if isinstance(p, MarginalizeVarDistribution):
raise ValueError("`pixyz.MarginalizeVarDistribution` is not supported for now.")
_cond_var = deepcopy(p.cond_var)
_var = deepcopy(p.var)
all_vars = _cond_var + _var
if not (set(replace_dict.keys()) <= set(all_vars)):
raise ValueError()
_replace_inv_cond_var_dict = {replace_dict[var]: var for var in _cond_var if var in replace_dict.keys()}
_replace_inv_dict = {value: key for key, value in replace_dict.items()}
self._replace_inv_cond_var_dict = _replace_inv_cond_var_dict
self._replace_inv_dict = _replace_inv_dict
self._replace_dict = replace_dict
_cond_var = [replace_dict[var] if var in replace_dict.keys() else var for var in _cond_var]
_var = [replace_dict[var] if var in replace_dict.keys() else var for var in _var]
super().__init__(cond_var=_cond_var, var=_var, name=p.name, features_shape=p.features_shape)
self.p = p
_input_var = [replace_dict[var] if var in replace_dict.keys() else var for var in p.input_var]
self._input_var = _input_var
def forward(self, *args, **kwargs):
return self.p.forward(*args, **kwargs)
def get_params(self, params_dict={}):
params_dict = replace_dict_keys(params_dict, self._replace_inv_cond_var_dict)
return self.p.get_params(params_dict)
def set_dist(self, x_dict={}, sampling=False, batch_n=None, **kwargs):
x_dict = replace_dict_keys(x_dict, self._replace_inv_cond_var_dict)
return self.p.set_dist(x_dict=x_dict, sampling=sampling, batch_n=batch_n, **kwargs)
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False, **kwargs):
input_dict = get_dict_values(x_dict, self.cond_var, return_dict=True)
replaced_input_dict = replace_dict_keys(input_dict, self._replace_inv_cond_var_dict)
output_dict = self.p.sample(replaced_input_dict, batch_n=batch_n, sample_shape=sample_shape,
return_all=False, reparam=reparam, **kwargs)
output_dict = replace_dict_keys(output_dict, self._replace_dict)
x_dict.update(output_dict)
return x_dict
def get_log_prob(self, x_dict, **kwargs):
input_dict = get_dict_values(x_dict, self.cond_var + self.var, return_dict=True)
input_dict = replace_dict_keys(input_dict, self._replace_inv_dict)
return self.p.get_log_prob(input_dict, **kwargs)
def sample_mean(self, x_dict={}):
input_dict = get_dict_values(x_dict, self.cond_var, return_dict=True)
input_dict = replace_dict_keys(input_dict, self._replace_inv_cond_var_dict)
return self.p.sample_mean(input_dict)
def sample_variance(self, x_dict={}):
input_dict = get_dict_values(x_dict, self.cond_var, return_dict=True)
input_dict = replace_dict_keys(input_dict, self._replace_inv_cond_var_dict)
return self.p.sample_variance(input_dict)
@property
def input_var(self):
return self._input_var
@property
def distribution_name(self):
return self.p.distribution_name
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
return self.p.__getattribute__(item)
class MarginalizeVarDistribution(Distribution):
r"""Marginalize variables in Distribution.
.. math::
p(x) = \int p(x,z) dz
Examples
--------
>>> a = DistributionBase(var=["x"], cond_var=["z"])
>>> b = DistributionBase(var=["y"], cond_var=["z"])
>>> p_multi = a * b
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> p_marg = MarginalizeVarDistribution(p_multi, ["y"])
>>> print(p_marg)
Distribution:
p(x|z) = \int p(x|z)p(y|z)dy
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, p, marginalize_list):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.DistributionBase`)
Distribution.
marginalize_list : list
Variables to marginalize.
"""
marginalize_list = tolist(marginalize_list)
if not isinstance(p, Distribution):
raise ValueError("Given input must be `pixyz.distributions.Distribution`, got {}.".format(type(p)))
if isinstance(p, DistributionBase):
raise ValueError("`pixyz.distributions.DistributionBase` cannot be marginalized its variables.")
_var = deepcopy(p.var)
_cond_var = deepcopy(p.cond_var)
if not((set(marginalize_list)) < set(_var)):
raise ValueError()
if not((set(marginalize_list)).isdisjoint(set(_cond_var))):
raise ValueError()
if len(marginalize_list) == 0:
raise ValueError("Length of `marginalize_list` must be at least 1, got 0.")
_var = [var for var in _var if var not in marginalize_list]
super().__init__(cond_var=_cond_var, var=_var, name=p.name, features_shape=p.features_shape)
self.p = p
self._marginalize_list = marginalize_list
def forward(self, *args, **kwargs):
return self.p.forward(*args, **kwargs)
def get_params(self, params_dict={}):
return self.p.get_params(params_dict)
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False, **kwargs):
output_dict = self.p.sample(x_dict=x_dict, batch_n=batch_n, sample_shape=sample_shape, return_all=return_all,
reparam=reparam, **kwargs)
output_dict = delete_dict_values(output_dict, self._marginalize_list)
return output_dict
def sample_mean(self, x_dict={}):
return self.p.sample_mean(x_dict)
def sample_variance(self, x_dict={}):
return self.p.sample_variance(x_dict)
@property
def input_var(self):
return self.p.input_var
@property
def distribution_name(self):
return self.p.distribution_name
@property
def prob_factorized_text(self):
integral_symbol = len(self._marginalize_list) * "\\int "
integral_variables = ["d" + str(var) for var in self._marginalize_list]
integral_variables = "".join(integral_variables)
return "{}{}{}".format(integral_symbol, self.p.prob_factorized_text, integral_variables)
def __repr__(self):
return self.p.__repr__()
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
return self.p.__getattribute__(item) | en | 0.509967 | Distribution class. In Pixyz, all distributions are required to inherit this class. Examples -------- >>> import torch >>> from torch.nn import functional as F >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[64], name="p1") >>> print(p1) Distribution: p_{1}(x) Network architecture: Normal( name=p_{1}, distribution_name=Normal, var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([64]) (loc): torch.Size([1, 64]) (scale): torch.Size([1, 64]) ) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[64], name="p2") >>> print(p2) Distribution: p_{2}(x|y) Network architecture: Normal( name=p_{2}, distribution_name=Normal, var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([64]) (scale): torch.Size([1, 64]) ) >>> # Conditional distribution (by neural networks) >>> class P(Normal): ... def __init__(self): ... super().__init__(var=["x"], cond_var=["y"], name="p3") ... self.model_loc = nn.Linear(128, 64) ... self.model_scale = nn.Linear(128, 64) ... def forward(self, y): ... return {"loc": self.model_loc(y), "scale": F.softplus(self.model_scale(y))} >>> p3 = P() >>> print(p3) Distribution: p_{3}(x|y) Network architecture: P( name=p_{3}, distribution_name=Normal, var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([]) (model_loc): Linear(in_features=128, out_features=64, bias=True) (model_scale): Linear(in_features=128, out_features=64, bias=True) ) Parameters ---------- var : :obj:`list` of :obj:`str` Variables of this distribution. cond_var : :obj:`list` of :obj:`str`, defaults to [] Conditional variables of this distribution. In case that cond_var is not empty, we must set the corresponding inputs to sample variables. name : :obj:`str`, defaults to "p" Name of this distribution. This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`. features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size()) Shape of dimensions (features) of this distribution. str: Name of this distribution class. str: Name of this distribution displayed in :obj:`prob_text` and :obj:`prob_factorized_text`. list: Variables of this distribution. list: Conditional variables of this distribution. list: Input variables of this distribution. Normally, it has same values as :attr:`cond_var`. str: Return a formula of the (joint) probability distribution. str: Return a formula of the factorized probability distribution. str: Return a formula of the factorized and the (joint) probability distributions. torch.Size or list: Shape of features of this distribution. Check the type of given input. If the input type is :obj:`dict`, this method checks whether the input keys contains the :attr:`var` list. In case that its type is :obj:`list` or :obj:`tensor`, it returns the output formatted in :obj:`dict`. Parameters ---------- input : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict` Input variables. var : :obj:`list` or :obj:`NoneType`, defaults to None Variables to check if given input contains them. This is set to None by default. Returns ------- input_dict : dict Variables checked in this method. Raises ------ ValueError Raises `ValueError` if the type of input is neither :obj:`torch.Tensor`, :obj:`list`, nor :obj:`dict. # TODO: we need to check if all the elements contained in this list are torch.Tensor. This method aims to get parameters of this distributions from constant parameters set in initialization and outputs of DNNs. Parameters ---------- params_dict : :obj:`dict`, defaults to {} Input parameters. Returns ------- output_dict : dict Output parameters. Examples -------- >>> from pixyz.distributions import Normal >>> dist_1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[1]) >>> print(dist_1) Distribution: p(x) Network architecture: Normal( name=p, distribution_name=Normal, var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1]) (loc): torch.Size([1, 1]) (scale): torch.Size([1, 1]) ) >>> dist_1.get_params() {'loc': tensor([[0.]]), 'scale': tensor([[1.]])} >>> dist_2 = Normal(loc=torch.tensor(0.), scale="z", cond_var=["z"], var=["x"]) >>> print(dist_2) Distribution: p(x|z) Network architecture: Normal( name=p, distribution_name=Normal, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) (loc): torch.Size([1]) ) >>> dist_2.get_params({"z": torch.tensor(1.)}) {'scale': tensor(1.), 'loc': tensor([0.])} Sample variables of this distribution. If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`. Parameters ---------- x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {} Input variables. sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size() Shape of generating samples. batch_n : :obj:`int`, defaults to None. Set batch size of parameters. return_all : :obj:`bool`, defaults to True Choose whether the output contains input variables. reparam : :obj:`bool`, defaults to False. Choose whether we sample variables with re-parameterized trick. Returns ------- output : dict Samples of this distribution. Examples -------- >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10, 2]) >>> print(p) Distribution: p(x) Network architecture: Normal( name=p, distribution_name=Normal, var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([10, 2]) (loc): torch.Size([1, 10, 2]) (scale): torch.Size([1, 10, 2]) ) >>> p.sample()["x"].shape # (batch_n=1, features_shape) torch.Size([1, 10, 2]) >>> p.sample(batch_n=20)["x"].shape # (batch_n, features_shape) torch.Size([20, 10, 2]) >>> p.sample(batch_n=20, sample_shape=[40, 30])["x"].shape # (sample_shape, batch_n, features_shape) torch.Size([40, 30, 20, 10, 2]) >>> # Conditional distribution >>> p = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10]) >>> print(p) Distribution: p(x|y) Network architecture: Normal( name=p, distribution_name=Normal, var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([10]) (scale): torch.Size([1, 10]) ) >>> sample_y = torch.randn(1, 10) # Psuedo data >>> sample_a = torch.randn(1, 10) # Psuedo data >>> sample = p.sample({"y": sample_y}) >>> print(sample) # input_var + var # doctest: +SKIP {'y': tensor([[-0.5182, 0.3484, 0.9042, 0.1914, 0.6905, -1.0859, -0.4433, -0.0255, 0.8198, 0.4571]]), 'x': tensor([[-0.7205, -1.3996, 0.5528, -0.3059, 0.5384, -1.4976, -0.1480, 0.0841,0.3321, 0.5561]])} >>> sample = p.sample({"y": sample_y, "a": sample_a}) # Redundant input ("a") >>> print(sample) # input_var + var + "a" (redundant input) # doctest: +SKIP {'y': tensor([[ 1.3582, -1.1151, -0.8111, 1.0630, 1.1633, 0.3855, 2.6324, -0.9357, -0.8649, -0.6015]]), 'a': tensor([[-0.1874, 1.7958, -1.4084, -2.5646, 1.0868, -0.7523, -0.0852, -2.4222, -0.3914, -0.9755]]), 'x': tensor([[-0.3272, -0.5222, -1.3659, 1.8386, 2.3204, 0.3686, 0.6311, -1.1208, 0.3656, -0.6683]])} Return the mean of the distribution. Parameters ---------- x_dict : :obj:`dict`, defaults to {} Parameters of this distribution. Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10], name="p1") >>> mean = p1.sample_mean() >>> print(mean) tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10], name="p2") >>> sample_y = torch.randn(1, 10) # Psuedo data >>> mean = p2.sample_mean({"y": sample_y}) >>> print(mean) # doctest: +SKIP tensor([[-0.2189, -1.0310, -0.1917, -0.3085, 1.5190, -0.9037, 1.2559, 0.1410, 1.2810, -0.6681]]) Return the variance of the distribution. Parameters ---------- x_dict : :obj:`dict`, defaults to {} Parameters of this distribution. Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10], name="p1") >>> var = p1.sample_variance() >>> print(var) tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10], name="p2") >>> sample_y = torch.randn(1, 10) # Psuedo data >>> var = p2.sample_variance({"y": sample_y}) >>> print(var) # doctest: +SKIP tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]) Giving variables, this method returns values of log-pdf. Parameters ---------- x_dict : dict Input variables. sum_features : :obj:`bool`, defaults to True Whether the output is summed across some dimensions which are specified by `feature_dims`. feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None Set dimensions to sum across the output. Returns ------- log_prob : torch.Tensor Values of log-probability density/mass function. Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10], name="p1") >>> sample_x = torch.randn(1, 10) # Psuedo data >>> log_prob = p1.log_prob({"x": sample_x}) >>> print(log_prob) # doctest: +SKIP tensor([-16.1153]) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10], name="p2") >>> sample_y = torch.randn(1, 10) # Psuedo data >>> log_prob = p2.log_prob({"x": sample_x, "y": sample_y}) >>> print(log_prob) # doctest: +SKIP tensor([-21.5251]) Giving variables, this method returns values of entropy. Parameters ---------- x_dict : dict, defaults to {} Input variables. sum_features : :obj:`bool`, defaults to True Whether the output is summed across some dimensions which are specified by :attr:`feature_dims`. feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None Set dimensions to sum across the output. Returns ------- entropy : torch.Tensor Values of entropy. Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10], name="p1") >>> entropy = p1.get_entropy() >>> print(entropy) tensor([14.1894]) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10], name="p2") >>> sample_y = torch.randn(1, 10) # Psuedo data >>> entropy = p2.get_entropy({"y": sample_y}) >>> print(entropy) tensor([14.1894]) Return an instance of :class:`pixyz.losses.LogProb`. Parameters ---------- sum_features : :obj:`bool`, defaults to True Whether the output is summed across some axes (dimensions) which are specified by :attr:`feature_dims`. feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None Set axes to sum across the output. Returns ------- pixyz.losses.LogProb An instance of :class:`pixyz.losses.LogProb` Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10], name="p1") >>> sample_x = torch.randn(1, 10) # Psuedo data >>> log_prob = p1.log_prob().eval({"x": sample_x}) >>> print(log_prob) # doctest: +SKIP tensor([-16.1153]) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10], name="p2") >>> sample_y = torch.randn(1, 10) # Psuedo data >>> log_prob = p2.log_prob().eval({"x": sample_x, "y": sample_y}) >>> print(log_prob) # doctest: +SKIP tensor([-21.5251]) Return an instance of :class:`pixyz.losses.LogProb`. Parameters ---------- sum_features : :obj:`bool`, defaults to True Choose whether the output is summed across some axes (dimensions) which are specified by :attr:`feature_dims`. feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None Set dimensions to sum across the output. (Note: this parameter is not used for now.) Returns ------- pixyz.losses.Prob An instance of :class:`pixyz.losses.Prob` Examples -------- >>> import torch >>> from pixyz.distributions import Normal >>> # Marginal distribution >>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], ... features_shape=[10], name="p1") >>> sample_x = torch.randn(1, 10) # Psuedo data >>> prob = p1.prob().eval({"x": sample_x}) >>> print(prob) # doctest: +SKIP tensor([4.0933e-07]) >>> # Conditional distribution >>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"], ... features_shape=[10], name="p2") >>> sample_y = torch.randn(1, 10) # Psuedo data >>> prob = p2.prob().eval({"x": sample_x, "y": sample_y}) >>> print(prob) # doctest: +SKIP tensor([2.9628e-09]) When this class is inherited by DNNs, this method should be overrided. Return an instance of :class:`pixyz.distributions.ReplaceVarDistribution`. Parameters ---------- replace_dict : dict Dictionary. Returns ------- pixyz.distributions.ReplaceVarDistribution An instance of :class:`pixyz.distributions.ReplaceVarDistribution` Return an instance of :class:`pixyz.distributions.MarginalizeVarDistribution`. Parameters ---------- marginalize_list : :obj:`list` or other Variables to marginalize. Returns ------- pixyz.distributions.MarginalizeVarDistribution An instance of :class:`pixyz.distributions.MarginalizeVarDistribution` # Distribution # Network architecture (`repr`) # parameters # add buffers to repr Distribution class with PyTorch. In Pixyz, all distributions are required to inherit this class. Format constant parameters of this distribution as buffers. Parameters ---------- params_dict : dict Constant parameters of this distribution set at initialization. If the values of these dictionaries contain parameters which are named as strings, which means that these parameters are set as `variables`, the correspondences between these values and the true name of these parameters are stored as :obj:`dict` (:attr:`replace_params_dict`). # scalar list: Return the list of parameter names for this distribution. Return the class of PyTorch distribution. Return the instance of PyTorch distribution. Set :attr:`dist` as PyTorch distributions given parameters. This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set. Parameters ---------- x_dict : :obj:`dict`, defaults to {}. Parameters of this distribution. sampling : :obj:`bool`, defaults to False. Choose whether to use relaxed_* in PyTorch distribution. batch_n : :obj:`int`, defaults to None. Set batch size of parameters. **kwargs Arbitrary keyword arguments. Returns ------- # expand batch_n Get a sample_shape shaped sample from :attr:`dist`. Parameters ---------- reparam : :obj:`bool`, defaults to True. Choose where to sample using re-parameterization trick. sample_shape : :obj:`tuple` or :obj:`torch.Size`, defaults to torch.Size(). Set the shape of a generated sample. Returns ------- samples_dict : dict Generated sample formatted by :obj:`dict`. # append constant parameters to output_dict # check whether the input is valid or convert it to valid dictionary. # conditioned Multiply by given distributions, e.g, :math:`p(x,y|z) = p(x|z,y)p(y|z)`. In this class, it is checked if two distributions can be multiplied. p(x|z)p(z|y) -> Valid p(x|z)p(y|z) -> Valid p(x|z)p(y|a) -> Valid p(x|z)p(z|x) -> Invalid (recursive) p(x|z)p(x|y) -> Invalid (conflict) Examples -------- >>> a = DistributionBase(var=["x"], cond_var=["z"]) >>> b = DistributionBase(var=["z"], cond_var=["y"]) >>> p_multi = MultiplyDistribution(a, b) >>> print(p_multi) Distribution: p(x,z|y) = p(x|z)p(z|y) Network architecture: DistributionBase( name=p, distribution_name=, var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([]) ) DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) >>> b = DistributionBase(var=["y"], cond_var=["z"]) >>> p_multi = MultiplyDistribution(a, b) >>> print(p_multi) Distribution: p(x,y|z) = p(x|z)p(y|z) Network architecture: DistributionBase( name=p, distribution_name=, var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) >>> b = DistributionBase(var=["y"], cond_var=["a"]) >>> p_multi = MultiplyDistribution(a, b) >>> print(p_multi) Distribution: p(x,y|z,a) = p(x|z)p(y|a) Network architecture: DistributionBase( name=p, distribution_name=, var=['y'], cond_var=['a'], input_var=['a'], features_shape=torch.Size([]) ) DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) Parameters ---------- a : pixyz.Distribution Distribution. b : pixyz.Distribution Distribution. # Check parent-child relationship between two distributions. # If inherited variables (`_inh_var`) are exist (e.g. c in p(e|c)p(c|a,b)), # then p(e|c) is a child and p(c|a,b) is a parent, otherwise it is opposite. # Check if variables of two distributions are "recursive" (e.g. p(x|z)p(z|x)). # Set variables. # e.g. p(x|z)p(x|y) # Set conditional variables. # Delete inh_var in conditional variables. # Set input_var (it might be different from cond_var if either a and b contain data distributions.) # sample from the parent distribution # sample from the child distribution Replace names of variables in Distribution. Examples -------- >>> p = DistributionBase(var=["x"], cond_var=["z"]) >>> print(p) Distribution: p(x|z) Network architecture: DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) >>> replace_dict = {'x': 'y'} >>> p_repl = ReplaceVarDistribution(p, replace_dict) >>> print(p_repl) Distribution: p(y|z) Network architecture: ReplaceVarDistribution( name=p, distribution_name=, var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) (p): DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) ) Parameters ---------- p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.MultiplyDistribution`) Distribution. replace_dict : dict Dictionary. Marginalize variables in Distribution. .. math:: p(x) = \int p(x,z) dz Examples -------- >>> a = DistributionBase(var=["x"], cond_var=["z"]) >>> b = DistributionBase(var=["y"], cond_var=["z"]) >>> p_multi = a * b >>> print(p_multi) Distribution: p(x,y|z) = p(x|z)p(y|z) Network architecture: DistributionBase( name=p, distribution_name=, var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) >>> p_marg = MarginalizeVarDistribution(p_multi, ["y"]) >>> print(p_marg) Distribution: p(x|z) = \int p(x|z)p(y|z)dy Network architecture: DistributionBase( name=p, distribution_name=, var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) DistributionBase( name=p, distribution_name=, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) ) Parameters ---------- p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.DistributionBase`) Distribution. marginalize_list : list Variables to marginalize. | 2.592123 | 3 |
htp/aux/alembic/versions/16411ed1210c_create_candles_table.py | kirkjules/machine-learned-timeseries | 1 | 6620022 | <filename>htp/aux/alembic/versions/16411ed1210c_create_candles_table.py
"""create Candles table
Revision ID: 16411ed1210c
Revises: 27047c3aa544
Create Date: 2020-01-24 18:07:40.159835
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers, used by Alembic.
revision = '16411ed1210c'
down_revision = '27047c3aa544'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'candles',
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column(
'batch_id', UUID(as_uuid=True), sa.ForeignKey("getTickerTask.id")),
sa.Column('timestamp', sa.DateTime()),
sa.Column('open', sa.Float(precision=6)),
sa.Column('high', sa.Float(precision=6)),
sa.Column('low', sa.Float(precision=6)),
sa.Column('close', sa.Float(precision=6)))
def downgrade():
op.drop_table('candles')
| <filename>htp/aux/alembic/versions/16411ed1210c_create_candles_table.py
"""create Candles table
Revision ID: 16411ed1210c
Revises: 27047c3aa544
Create Date: 2020-01-24 18:07:40.159835
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers, used by Alembic.
revision = '16411ed1210c'
down_revision = '27047c3aa544'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'candles',
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column(
'batch_id', UUID(as_uuid=True), sa.ForeignKey("getTickerTask.id")),
sa.Column('timestamp', sa.DateTime()),
sa.Column('open', sa.Float(precision=6)),
sa.Column('high', sa.Float(precision=6)),
sa.Column('low', sa.Float(precision=6)),
sa.Column('close', sa.Float(precision=6)))
def downgrade():
op.drop_table('candles')
| en | 0.397378 | create Candles table Revision ID: 16411ed1210c Revises: 27047c3aa544 Create Date: 2020-01-24 18:07:40.159835 # revision identifiers, used by Alembic. | 1.690818 | 2 |
bot/db.py | ne-bknn/CSSH | 1 | 6620023 | from abc import abstractmethod
import aioredis
class AbstractDB:
"""Abstract Key DB interface"""
@abstractmethod
def __init__(self):
pass
@classmethod
@abstractmethod
async def create(cls, connection: str):
"""Async DB initializer"""
@classmethod
@abstractmethod
async def create_tmp(cls, connection: str):
"""Async DB initializer without threadpool (for one-shot connections without access to global connection"""
@abstractmethod
async def create_user(self, user_id: int, username: str):
"""Creates telegram_id-username connection"""
@abstractmethod
async def create_key(self, user_id: int, secret: str):
"""Records a secret for given telegram_id"""
@abstractmethod
async def is_registered(self, user_id: int):
"""Checks whether user is already registered"""
@abstractmethod
async def contains(self, username: str):
"""Checks whether given username is already taken"""
@abstractmethod
async def update_key(self, user_id: int, secret: str):
"""Updates a secret for given telegram_id"""
@abstractmethod
async def get_username(self, user_id: int):
"""Gets username by telegram_id"""
@abstractmethod
async def get_secret(self, user_id: int):
"""Gets publickey by telegram_id"""
@abstractmethod
async def set_task(self, user_id: int, task_name: str):
"""Set current task of a user"""
@abstractmethod
async def get_tasks(self):
"""Get all task names"""
@abstractmethod
async def close(self):
"""Closes connection to DB"""
class RedisDB(AbstractDB):
def __init__(self):
self.conn = None
@classmethod
async def create(cls, connection: str):
"""Async Redis backed initializer"""
self = RedisDB()
self.conn = await aioredis.create_redis_pool(connection)
return self
@classmethod
async def create_tmp(cls, connection: str):
self = RedisDB()
self.conn = await aioredis.create_connection(connection)
return self
async def create_user(self, user_id: int, username: str):
await self.conn.execute("set", f"user:{user_id}", username)
await self.conn.execute("sadd", "telegram_ids", user_id)
await self.conn.execute("sadd", "usernames", username)
await self.conn.execute("set", f"username:{username}", str(user_id))
async def create_key(self, user_id: int, secret: str):
await self.conn.execute("set", f"secrets:{user_id}", secret)
async def is_registered(self, user_id: int):
return bool(await self.conn.execute("sismember", "telegram_ids", user_id))
async def contains(self, username: str):
return bool(await self.conn.execute("sismember", "usernames", username))
async def update_key(self, user_id: int, secret: str):
await self.create_key(user_id, secret)
async def get_username(self, user_id: int):
return await self.conn.execute("get", f"user:{user_id}")
async def get_secret(self, user_id: int):
return await self.conn.execute("get", f"secrets:{user_id}")
async def del_image(self, image_name: str):
await self.conn.execute("srem", "images_set", image_name)
async def add_image(self, image_name: str):
await self.conn.execute("sadd", "images_set", image_name)
async def get_images(self):
images = await self.conn.execute("smembers", "images_set")
images = [image.decode() for image in images]
return images
async def contains_image(self, imagename: str):
return bool(await self.conn.execute("sismember", "images_set", imagename))
async def set_image(self, user_id: int, imagename: str):
await self.conn.execute("set", f"images:{user_id}", imagename)
async def close(self):
"""Closes connection to redis"""
self.conn.close()
await self.conn.wait_closed()
| from abc import abstractmethod
import aioredis
class AbstractDB:
"""Abstract Key DB interface"""
@abstractmethod
def __init__(self):
pass
@classmethod
@abstractmethod
async def create(cls, connection: str):
"""Async DB initializer"""
@classmethod
@abstractmethod
async def create_tmp(cls, connection: str):
"""Async DB initializer without threadpool (for one-shot connections without access to global connection"""
@abstractmethod
async def create_user(self, user_id: int, username: str):
"""Creates telegram_id-username connection"""
@abstractmethod
async def create_key(self, user_id: int, secret: str):
"""Records a secret for given telegram_id"""
@abstractmethod
async def is_registered(self, user_id: int):
"""Checks whether user is already registered"""
@abstractmethod
async def contains(self, username: str):
"""Checks whether given username is already taken"""
@abstractmethod
async def update_key(self, user_id: int, secret: str):
"""Updates a secret for given telegram_id"""
@abstractmethod
async def get_username(self, user_id: int):
"""Gets username by telegram_id"""
@abstractmethod
async def get_secret(self, user_id: int):
"""Gets publickey by telegram_id"""
@abstractmethod
async def set_task(self, user_id: int, task_name: str):
"""Set current task of a user"""
@abstractmethod
async def get_tasks(self):
"""Get all task names"""
@abstractmethod
async def close(self):
"""Closes connection to DB"""
class RedisDB(AbstractDB):
def __init__(self):
self.conn = None
@classmethod
async def create(cls, connection: str):
"""Async Redis backed initializer"""
self = RedisDB()
self.conn = await aioredis.create_redis_pool(connection)
return self
@classmethod
async def create_tmp(cls, connection: str):
self = RedisDB()
self.conn = await aioredis.create_connection(connection)
return self
async def create_user(self, user_id: int, username: str):
await self.conn.execute("set", f"user:{user_id}", username)
await self.conn.execute("sadd", "telegram_ids", user_id)
await self.conn.execute("sadd", "usernames", username)
await self.conn.execute("set", f"username:{username}", str(user_id))
async def create_key(self, user_id: int, secret: str):
await self.conn.execute("set", f"secrets:{user_id}", secret)
async def is_registered(self, user_id: int):
return bool(await self.conn.execute("sismember", "telegram_ids", user_id))
async def contains(self, username: str):
return bool(await self.conn.execute("sismember", "usernames", username))
async def update_key(self, user_id: int, secret: str):
await self.create_key(user_id, secret)
async def get_username(self, user_id: int):
return await self.conn.execute("get", f"user:{user_id}")
async def get_secret(self, user_id: int):
return await self.conn.execute("get", f"secrets:{user_id}")
async def del_image(self, image_name: str):
await self.conn.execute("srem", "images_set", image_name)
async def add_image(self, image_name: str):
await self.conn.execute("sadd", "images_set", image_name)
async def get_images(self):
images = await self.conn.execute("smembers", "images_set")
images = [image.decode() for image in images]
return images
async def contains_image(self, imagename: str):
return bool(await self.conn.execute("sismember", "images_set", imagename))
async def set_image(self, user_id: int, imagename: str):
await self.conn.execute("set", f"images:{user_id}", imagename)
async def close(self):
"""Closes connection to redis"""
self.conn.close()
await self.conn.wait_closed()
| en | 0.735606 | Abstract Key DB interface Async DB initializer Async DB initializer without threadpool (for one-shot connections without access to global connection Creates telegram_id-username connection Records a secret for given telegram_id Checks whether user is already registered Checks whether given username is already taken Updates a secret for given telegram_id Gets username by telegram_id Gets publickey by telegram_id Set current task of a user Get all task names Closes connection to DB Async Redis backed initializer Closes connection to redis | 2.949494 | 3 |
Leetcode/Medium/Find_the_Duplicate_Number.py | drkndl/Coding-Practice | 0 | 6620024 | # Runtime: 3492 ms, faster than 5.01% of Python3 online submissions for Find the Duplicate Number.
# Memory Usage: 16.7 MB, less than 26.91% of Python3 online submissions for Find the Duplicate Number.
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
for i in nums:
if nums.count(i)>1:
return i
| # Runtime: 3492 ms, faster than 5.01% of Python3 online submissions for Find the Duplicate Number.
# Memory Usage: 16.7 MB, less than 26.91% of Python3 online submissions for Find the Duplicate Number.
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
for i in nums:
if nums.count(i)>1:
return i
| en | 0.761268 | # Runtime: 3492 ms, faster than 5.01% of Python3 online submissions for Find the Duplicate Number. # Memory Usage: 16.7 MB, less than 26.91% of Python3 online submissions for Find the Duplicate Number. | 3.701344 | 4 |
FED-Python-scripts/eating_rate.py | KravitzLabDevices/FED1 | 4 | 6620025 | <reponame>KravitzLabDevices/FED1
'''
Author: kravitzlab
Date: July 15 2016
Purpose: The application processes multiple files with timestamps(first column of a csv file) corresponding to the
single pellet retrieved by a mouse. It extracts only common full 12 hours daytime and nighttime intervals, in order
to later compare data sets from equal sized nighttime and daytime periods. User can define what were
the nighttime and daytime hours in the experiment. User can also define the time for calculating the eating rate
(between pellets per 1 min and per 2 hours).Then, according to the given parameters, the application plots a bar chart
with the results of analyzis and standard errors, and a statistical significance(ttest), if there is one
('*' for p<0.05, '**' for p<0.01). In addition, the program prints out the values in the console.
'''
'''
Requirements: Anaconda(Python3.5)
Tested on Windows7.
'''
import os, sys
import fnmatch
import tkinter
from tkinter import *
from tkinter import filedialog
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime as dt
from datetime import timedelta
import numpy as np
from scipy.stats import ttest_ind
import math
import csv
# default application variables in the initial options window
fields = ['Time in seconds', 'Lights out hour', 'Lights on hour']
variables = ['3600','15','3'] # 30min interval in seconds(1800sec), lights out at 3pm, lights on at 3am
# function to pop up the information about the problem
def popup_msg(message):
popup = Tk()
popup.wm_title("!")
label = Label(popup, text=message)
label.pack(side="top", fill="x", pady=10)
B1 = Button(popup, text="Ok", command = lambda: sys.exit())
B1.pack()
popup.mainloop()
# function to set variables according to the user input
def fetch(root,entries):
for i in range(len(entries)):
variables[i] = entries[i][1].get()
root.quit()
# function to create the options window with default variables displayed
def take_options(root, fields, variables):
entries = list()
for i in range(len(fields)):
row = Frame(root)
lab = Label(row, width=20, text=fields[i], anchor='w')
ent = Entry(row)
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
ent.pack(side=RIGHT, expand=YES, fill=X)
ent.insert(20, variables[i])
entries.append((fields[i], ent))
return entries
# create option window with entry fields
option_window = Tk()
option_window.title('Options')
ents = take_options(option_window, fields, variables)
option_window.bind('<Return>', (lambda event, e=ents: fetch(e)))
b1 = Button(option_window, text='OK', command=(lambda e=ents: fetch(option_window, e)))
b1.pack(side=RIGHT, padx=5, pady=5)
b2 = Button(option_window, text='Quit', fg='red',command=sys.exit)
b2.pack(side=LEFT, padx=5, pady=5)
option_window.mainloop()
# Set application constants accordingly
# verify user input
try:
bin = int(variables[0])
lights_out = int(variables[1])
lights_on = int(variables[2])
if bin < 60 or bin > 7200 or lights_out < 0 or lights_out >= 24 or lights_on < 0 or lights_on >= 24:
popup_msg("Time bin has to be 60-7200sec\nHours in 24hour format")
except:
popup_msg("Wrong input")
# display folders through Tkinter, tkFileDialog
# set the path to the folder according to users choice
src = filedialog.askdirectory()
########################################## functions
# Converts timestamp into a number
def convertTime(date):
return md.date2num(dt.datetime.strptime(date, "%m/%d/%Y %H:%M:%S"))
# get data from a file (only the first column=date)
# takes a csv file as an argument
# returns a list of datetime elements( all timestamps) from this file
def get_data(filename):
my_cols = list()
with open(filename) as csvfile:
the_data = csv.reader(csvfile, delimiter=',')
for line in the_data:
try:
if int(line[1]) != 0:
my_cols.append(md.num2date(convertTime(line[0]), tz=None))
except:
continue
return my_cols
# returns a list of lists
# each list contains all timestamps from a single csv file from the folder (e.g. 8files=8lists within returned list)
# it takes a path to the folder as an argument
def read_all(path):
try: # if user manually points to nonexistent folder
# os.listdir(path) lists all files in the directory form the given path
directory = os.listdir(path)
list_all = list()
for file in directory:
# search only those that are csv files
if fnmatch.fnmatch(file, '*.csv'):
# get_data(filename) function will now read all of the timestamps from one fille
# and add it in the form of list to the list_all
list_all.append(get_data(os.path.join(path, file)))
except:
popup_msg("No file was read")
# check if any data was read
if len(list_all) == 0:
popup_msg("No file was read")
else:
for i in range(len(list_all)):
if len(list_all[i]) == 0:
popup_msg("Some files were not read")
return list_all
# returns the earliest common date and latest common date
# we are interested only in the common time period
# takes a list of lists of timestamps as an argument (result of read_all function)
def get_border_times(list_all):
# append only the first timestamps from each file
all_start_dates = [min(file) for file in list_all]
# append only the last timestamps from each file
all_end_dates = [max(file) for file in list_all]
# the latest date in all_start_dates will be the earliest common date
earliest = max(all_start_dates) # find the earliest common
# the earliest date in all_end_dates will be the latest common date
latest = min(all_end_dates) # find the latest common
return earliest, latest
# returns data from start to end date only (a list of lists of timestamps)
# takes as an argument a list of lists of timestamps (result of read_all function)
# and the earliest and latest common dates we want to plot (results of get_border_times function)
def extract_times(list_all, start_date, end_date):
extracted_data = list()
for el in list_all:
start_index = 0
end_index = 0
for timestamp in el:
# as soon as it finds start date, it stops iterating further
if timestamp >= start_date and timestamp <= end_date:
# get the index for the start date in that list
start_index = el.index(timestamp)
break
for timestamp in reversed(el):
# as soon as it finds end date, it stops iterating
if timestamp <= end_date and timestamp >= start_date:
# get the index for the end date in that list
end_index = el.index(timestamp) + 1 # add 1 for the list slicing to include that index
break
# append only lists from start to end date
extracted_data.append(el[start_index:end_index])
return extracted_data
# returns list of start-end tuples representing given interval of nighttime hours (number format)
# takes as an argument: a single list of timestamps(one sample file), start_hour=beginning of nighttime,
# end_hour=end of nighttime(24hours:1-00), and start and end time of a whole plot(data from: get_border_times(list_all))
def get_intervals(list_of_timestamps, start_hour, end_hour, earliest, latest):
dates_from_file = list()
interval = list()
date2num_begin = md.date2num(earliest) # beginning of plot
date2num_end = md.date2num(latest) # end of plot
# check how many dates(calendar days) are in the fed
for el in list_of_timestamps:
if el.date() not in dates_from_file:
dates_from_file.append(el.date())
# for each date in fed, create start_hour-end_hour pair of night interval (datetime, number format)
if start_hour >= 12:
for i in range(len(dates_from_file)):
# start interval
date2num = md.date2num(dt.datetime.combine(dates_from_file[i], dt.time(hour=start_hour)))
if (i+1) < len(dates_from_file): # makes sure it is not the last inteval
# end interval
date2num_next = md.date2num(dt.datetime.combine(dates_from_file[i+1], dt.time(hour=end_hour)))
else: ## it means it is the last interval
# if there is only one day on the list check if the start interval is later than beginning
if len(dates_from_file) == 1:
temp0 = date2num if date2num >= date2num_begin else date2num_begin
interval.append((temp0, date2num_end))
break
else:
if date2num <= date2num_end:
interval.append((date2num, date2num_end))
break
# if the start interval hour is later than first timestamp, set the beginning of interval to beginning of plot
if date2num >= date2num_begin:
temp0 = date2num
# if the next date is in the list, set it to the end of nighttime, if not set the end of plot to be the end of nighttime
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
# if the start hour on that date was earlier than the plot, set the first available to be the beginning of nighttime
else:
temp0 = date2num_begin
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
interval.append((temp0,temp1))
else: # lights out hour before noon
for i in range(len(dates_from_file)):
# start interval
date2num = md.date2num(dt.datetime.combine(dates_from_file[i], dt.time(hour=start_hour)))
# end interval
date2num_next = md.date2num(dt.datetime.combine(dates_from_file[i], dt.time(hour=end_hour)))
if (i == len(dates_from_file) - 1) or i == 0: # for the last interval or if it is the only one
# if the start interval hour is later than first timestamp, set the beginning of interval to beginning of plot
if date2num >= date2num_begin:
temp0 = date2num
# if the next date is in the list, set it to the end of nighttime, if not set the end of plot to be the end of nighttime
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
# if the start hour on that date was earlier than the plot, set the first available to be the beginning of nighttime
else:
temp0 = date2num_begin
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
interval.append((temp0,temp1))
else: # if it is not the last or first interval
interval.append((date2num,date2num_next))
return interval
# returns daytime intervals based on nights
# it takes as arguments start and end time of a whole plot(data from: get_border_times(list_all))=earliesr, latest
# and nighttime intervals(result of get_intervals)
def reverse_intervals(earliest, latest, interval):
daytime = list()
earliest = md.date2num(earliest) # beginning of plot, convert to date
latest = md.date2num(latest)
for i in range(len(interval)):
if (i+1) < len(interval): # if it is not the last interval and there are more than 1 intervals
if i == 0: # if it is the first one
if earliest < interval[i][0]:
daytime.append((earliest, interval[i][0]))
daytime.append((interval[i][1],interval[i+1][0]))
else:
daytime.append((interval[i][1],interval[i+1][0]))
else:
daytime.append((interval[i][1], interval[i+1][0]))
else: # it is last one
if len(interval) == 1: # if there was only one
if earliest < interval[i][0]:
daytime.append((earliest, interval[i][0]))
if interval[i][1] < latest:
daytime.append((interval[i][1], latest))
else: # last but there were more than one
if interval[i][1] < latest:
daytime.append((interval[i][1], latest))
return daytime
# look for full 12 hour night periods
def get_12h_intervals(interval):
my_intervals = list()
for el in interval:
# convert time number to date in order to compare, 43200sec=12hours
if (md.num2date(el[1]) - md.num2date(el[0])).total_seconds() == 43200:
my_intervals.append(el)
return my_intervals
# returns average eating rate and standard error, and data to error(for ttest)
# takes as argument extracted data(list of common timestamps for all files) and result of get_12h_intervals function
def get_nights_rate(extracted_data, full_nights):
only_nights = [] # divide extracted data into single night (or day) intervals
for el in full_nights:
start, end = el
only_nights.append(extract_times(extracted_data, md.num2date(start), md.num2date(end)))
all_bins = [] # fill the bins for each night (or day) separately
for el in only_nights:
the_oldest, the_newest = get_border_times(el)
how_many_bins = get_number_of_bins(the_newest, the_oldest, bin)
all_bins.append(fill_bins(how_many_bins, el, the_oldest, bin))
# calculate rates for each night/day
rates_per_night = [get_rate(all_bins[i]) for i in range(len(all_bins))]
# extract from the above tuples only rates
rates = [rates_per_night[i][0] for i in range(len(rates_per_night))]
avg = sum(rates)/len(rates_per_night) # calculate total average rate
# concatenate all data (from all nights or days) for std error and ttest
data2err = []
for el in rates_per_night:
data2err.extend(el[1])
return avg, my_std_err(data2err), data2err
# returns full 12hour nights and days timestamps, where number of days = number of nights
def get_days_and_nights(extracted_data, full_nights, full_days):
# make full nights equal full days
while (len(full_days) != len(full_nights)):
if len(full_days) > len(full_nights):
del full_days[-1]
else:
del full_nights[-1]
start = full_nights[0][0] if full_nights[0][0] < full_days[0][0] else full_days[0][0]
end = full_nights[-1][1] if full_nights[-1][1] > full_days[-1][1] else full_days[-1][1]
return extract_times(extracted_data, md.num2date(start), md.num2date(end))
# function to find number of bins given 2 times and a desired time interval
# time difference is a timedelta type, it is first converted to seconds and divided by interval in seconds
def get_number_of_bins (latest, earliest, tinterval):
return int(math.floor((latest-earliest).total_seconds()/tinterval))
# fill each bin(number of bins=number of time intervals) according to the data from each file
# returns list of lists of bins (number of lists=number of files)
# takes as arguments number of all intervals(bins calculated from get_number_of_bins function),
# list of lists of timestamps (result of extract_times function), earliest common date, and time interval(e.g. 1hour=3600sec) in seconds
def fill_bins(intervalsNo, list_all, earliest, interval):
# create empty bins accorcing to the number of intervals
list_of_bins = [np.zeros(intervalsNo) for i in range(len(list_all))]
# fill the empty bins with timestamp count
for i in range(len(list_all)):
for j in range(len(list_all[i])):
tick = get_number_of_bins(list_all[i][j], earliest, interval)
if tick-1 < intervalsNo:
# subtract 1 from index=tick, because indexes start from 0
list_of_bins[i][tick-1] += 1
return list_of_bins
# returns a tuple of average rate and data to calculate std error
def get_rate(list_of_bins):
individual_rates = [sum(list_of_bins[i])/len(list_of_bins[i]) for i in range(len(list_of_bins))]
return sum(individual_rates)/len(individual_rates), individual_rates
# my std error function to calculate standard errors from given list
def my_std_err(my_list):
temp = 0
average = sum(my_list)/len(my_list)
for i in range(len(my_list)):
temp = temp + math.pow((my_list[i]-average), 2)
try:
std_dev = math.sqrt(temp)/math.sqrt(len(my_list)-1)
std_err = std_dev/math.sqrt(len(my_list))
except:
std_err = -1
return std_err
############################################### extracting data and calculations
# read all csv files from the folder in the given path=get data in the form of list of lists
# each list contains all timestamps from a single csv file
my_data = read_all(src)
start, end = get_border_times(my_data) # get first and last common date from all data
common_data = extract_times(my_data, start, end) # extract only common dates from all data to plot
nights = get_intervals(common_data[0], lights_out, lights_on, start, end) # get nighttime intervals
days = reverse_intervals(start, end, nights) #daytime intervals
full_nights_only = get_12h_intervals(nights) # list of tuples of start and end time of each night interval)
full_days_only = get_12h_intervals(days) # list of tuples of start and end time of each day interval)
common_days_nights = get_days_and_nights(common_data, full_nights_only, full_days_only) # equal number of days and nights
############################### print the analyzis in the console
do_stats = True # boolean to skip the stats if there was not enough information
night_rate, night_error, night2ttest = get_nights_rate(common_days_nights, full_nights_only)
print ("Pellets per hour by night: ", night_rate, "err: ", night_error)
day_rate, day_error, day2ttest = get_nights_rate(common_days_nights, full_days_only)
print ("Pellets per hour by night: ", day_rate,"err: ", day_error)
# ttest
# check if there was enough information to calculate the stats
if night_error == -1 or night_error == 0 or day_error == -1 or day_error == 0:
do_stats = False
popup = Tk()
popup.wm_title("!")
label = Label(popup, text="Not enough data to calculate\nstandard error and significance!\n\nPress 'ok' in Options window again\nto see the plot anyway.")
label.pack(side="top", fill="x", pady=10)
B1 = Button(popup, text="Ok", command = lambda: popup.withdraw())
B1.pack()
popup.mainloop()
else:
ttest, p = ttest_ind(night2ttest, day2ttest)
print ("p = ", p)
############################################################## plot
N = 2 # number of bars to plot(dark and light)
fig = plt.figure(facecolor='w')
x = np.arange(N) # arrange columns
ax1 = plt.subplot2grid((1,1),(0,0))
plt.ylabel('Eating rate (pellets/hour)')
ax1.set_frame_on(False)
y = [night_rate, day_rate]
if do_stats == True:
# yerr first in tuple is to first colunm second to second,
# first tuple is for positive values, second for negative
# drk, lght = plt.bar(x, y, width = 0.7, yerr=[(10,2),(10,2)])
drk, lght = plt.bar(x, y, width = 0.7, yerr=[(night_error,day_error),(night_error,day_error)], ecolor='k')
else:
drk, lght = plt.bar(x, y, width = 0.7)
centers = x + 0.5*drk.get_width() # align labels in the center
ax1.set_xticks(centers)
drk.set_facecolor('0.85') # shade of gray
lght.set_facecolor('w')
ax1.set_xticklabels(['Dark', 'Light'])
if do_stats == True:
# check p < 0.01(**), p < 0.05(*)
if p < 0.05:
text = '*' if p >= 0.01 else '**'
a = (centers[0] + centers[1])/2
b = 1.05*max(y[0],y[1])
dx = abs(centers[0]-centers[1])
props = {'connectionstyle':'bar','arrowstyle':'-',\
'shrinkA':20,'shrinkB':20,'lw':1}
# position the text in the middle on the top of the bar
ax1.annotate(text, xy=(centers[0]+(dx/2.2),1.5*b), zorder=10)
ax1.annotate('', xy=(centers[0],b), xytext=(centers[1],b), arrowprops=props)
plt.ylim(ymax=b+(0.6*b))
plt.show()
| '''
Author: kravitzlab
Date: July 15 2016
Purpose: The application processes multiple files with timestamps(first column of a csv file) corresponding to the
single pellet retrieved by a mouse. It extracts only common full 12 hours daytime and nighttime intervals, in order
to later compare data sets from equal sized nighttime and daytime periods. User can define what were
the nighttime and daytime hours in the experiment. User can also define the time for calculating the eating rate
(between pellets per 1 min and per 2 hours).Then, according to the given parameters, the application plots a bar chart
with the results of analyzis and standard errors, and a statistical significance(ttest), if there is one
('*' for p<0.05, '**' for p<0.01). In addition, the program prints out the values in the console.
'''
'''
Requirements: Anaconda(Python3.5)
Tested on Windows7.
'''
import os, sys
import fnmatch
import tkinter
from tkinter import *
from tkinter import filedialog
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime as dt
from datetime import timedelta
import numpy as np
from scipy.stats import ttest_ind
import math
import csv
# default application variables in the initial options window
fields = ['Time in seconds', 'Lights out hour', 'Lights on hour']
variables = ['3600','15','3'] # 30min interval in seconds(1800sec), lights out at 3pm, lights on at 3am
# function to pop up the information about the problem
def popup_msg(message):
popup = Tk()
popup.wm_title("!")
label = Label(popup, text=message)
label.pack(side="top", fill="x", pady=10)
B1 = Button(popup, text="Ok", command = lambda: sys.exit())
B1.pack()
popup.mainloop()
# function to set variables according to the user input
def fetch(root,entries):
for i in range(len(entries)):
variables[i] = entries[i][1].get()
root.quit()
# function to create the options window with default variables displayed
def take_options(root, fields, variables):
entries = list()
for i in range(len(fields)):
row = Frame(root)
lab = Label(row, width=20, text=fields[i], anchor='w')
ent = Entry(row)
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
ent.pack(side=RIGHT, expand=YES, fill=X)
ent.insert(20, variables[i])
entries.append((fields[i], ent))
return entries
# create option window with entry fields
option_window = Tk()
option_window.title('Options')
ents = take_options(option_window, fields, variables)
option_window.bind('<Return>', (lambda event, e=ents: fetch(e)))
b1 = Button(option_window, text='OK', command=(lambda e=ents: fetch(option_window, e)))
b1.pack(side=RIGHT, padx=5, pady=5)
b2 = Button(option_window, text='Quit', fg='red',command=sys.exit)
b2.pack(side=LEFT, padx=5, pady=5)
option_window.mainloop()
# Set application constants accordingly
# verify user input
try:
bin = int(variables[0])
lights_out = int(variables[1])
lights_on = int(variables[2])
if bin < 60 or bin > 7200 or lights_out < 0 or lights_out >= 24 or lights_on < 0 or lights_on >= 24:
popup_msg("Time bin has to be 60-7200sec\nHours in 24hour format")
except:
popup_msg("Wrong input")
# display folders through Tkinter, tkFileDialog
# set the path to the folder according to users choice
src = filedialog.askdirectory()
########################################## functions
# Converts timestamp into a number
def convertTime(date):
return md.date2num(dt.datetime.strptime(date, "%m/%d/%Y %H:%M:%S"))
# get data from a file (only the first column=date)
# takes a csv file as an argument
# returns a list of datetime elements( all timestamps) from this file
def get_data(filename):
my_cols = list()
with open(filename) as csvfile:
the_data = csv.reader(csvfile, delimiter=',')
for line in the_data:
try:
if int(line[1]) != 0:
my_cols.append(md.num2date(convertTime(line[0]), tz=None))
except:
continue
return my_cols
# returns a list of lists
# each list contains all timestamps from a single csv file from the folder (e.g. 8files=8lists within returned list)
# it takes a path to the folder as an argument
def read_all(path):
try: # if user manually points to nonexistent folder
# os.listdir(path) lists all files in the directory form the given path
directory = os.listdir(path)
list_all = list()
for file in directory:
# search only those that are csv files
if fnmatch.fnmatch(file, '*.csv'):
# get_data(filename) function will now read all of the timestamps from one fille
# and add it in the form of list to the list_all
list_all.append(get_data(os.path.join(path, file)))
except:
popup_msg("No file was read")
# check if any data was read
if len(list_all) == 0:
popup_msg("No file was read")
else:
for i in range(len(list_all)):
if len(list_all[i]) == 0:
popup_msg("Some files were not read")
return list_all
# returns the earliest common date and latest common date
# we are interested only in the common time period
# takes a list of lists of timestamps as an argument (result of read_all function)
def get_border_times(list_all):
# append only the first timestamps from each file
all_start_dates = [min(file) for file in list_all]
# append only the last timestamps from each file
all_end_dates = [max(file) for file in list_all]
# the latest date in all_start_dates will be the earliest common date
earliest = max(all_start_dates) # find the earliest common
# the earliest date in all_end_dates will be the latest common date
latest = min(all_end_dates) # find the latest common
return earliest, latest
# returns data from start to end date only (a list of lists of timestamps)
# takes as an argument a list of lists of timestamps (result of read_all function)
# and the earliest and latest common dates we want to plot (results of get_border_times function)
def extract_times(list_all, start_date, end_date):
extracted_data = list()
for el in list_all:
start_index = 0
end_index = 0
for timestamp in el:
# as soon as it finds start date, it stops iterating further
if timestamp >= start_date and timestamp <= end_date:
# get the index for the start date in that list
start_index = el.index(timestamp)
break
for timestamp in reversed(el):
# as soon as it finds end date, it stops iterating
if timestamp <= end_date and timestamp >= start_date:
# get the index for the end date in that list
end_index = el.index(timestamp) + 1 # add 1 for the list slicing to include that index
break
# append only lists from start to end date
extracted_data.append(el[start_index:end_index])
return extracted_data
# returns list of start-end tuples representing given interval of nighttime hours (number format)
# takes as an argument: a single list of timestamps(one sample file), start_hour=beginning of nighttime,
# end_hour=end of nighttime(24hours:1-00), and start and end time of a whole plot(data from: get_border_times(list_all))
def get_intervals(list_of_timestamps, start_hour, end_hour, earliest, latest):
dates_from_file = list()
interval = list()
date2num_begin = md.date2num(earliest) # beginning of plot
date2num_end = md.date2num(latest) # end of plot
# check how many dates(calendar days) are in the fed
for el in list_of_timestamps:
if el.date() not in dates_from_file:
dates_from_file.append(el.date())
# for each date in fed, create start_hour-end_hour pair of night interval (datetime, number format)
if start_hour >= 12:
for i in range(len(dates_from_file)):
# start interval
date2num = md.date2num(dt.datetime.combine(dates_from_file[i], dt.time(hour=start_hour)))
if (i+1) < len(dates_from_file): # makes sure it is not the last inteval
# end interval
date2num_next = md.date2num(dt.datetime.combine(dates_from_file[i+1], dt.time(hour=end_hour)))
else: ## it means it is the last interval
# if there is only one day on the list check if the start interval is later than beginning
if len(dates_from_file) == 1:
temp0 = date2num if date2num >= date2num_begin else date2num_begin
interval.append((temp0, date2num_end))
break
else:
if date2num <= date2num_end:
interval.append((date2num, date2num_end))
break
# if the start interval hour is later than first timestamp, set the beginning of interval to beginning of plot
if date2num >= date2num_begin:
temp0 = date2num
# if the next date is in the list, set it to the end of nighttime, if not set the end of plot to be the end of nighttime
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
# if the start hour on that date was earlier than the plot, set the first available to be the beginning of nighttime
else:
temp0 = date2num_begin
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
interval.append((temp0,temp1))
else: # lights out hour before noon
for i in range(len(dates_from_file)):
# start interval
date2num = md.date2num(dt.datetime.combine(dates_from_file[i], dt.time(hour=start_hour)))
# end interval
date2num_next = md.date2num(dt.datetime.combine(dates_from_file[i], dt.time(hour=end_hour)))
if (i == len(dates_from_file) - 1) or i == 0: # for the last interval or if it is the only one
# if the start interval hour is later than first timestamp, set the beginning of interval to beginning of plot
if date2num >= date2num_begin:
temp0 = date2num
# if the next date is in the list, set it to the end of nighttime, if not set the end of plot to be the end of nighttime
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
# if the start hour on that date was earlier than the plot, set the first available to be the beginning of nighttime
else:
temp0 = date2num_begin
temp1 = date2num_next if date2num_next <= date2num_end else date2num_end
interval.append((temp0,temp1))
else: # if it is not the last or first interval
interval.append((date2num,date2num_next))
return interval
# returns daytime intervals based on nights
# it takes as arguments start and end time of a whole plot(data from: get_border_times(list_all))=earliesr, latest
# and nighttime intervals(result of get_intervals)
def reverse_intervals(earliest, latest, interval):
daytime = list()
earliest = md.date2num(earliest) # beginning of plot, convert to date
latest = md.date2num(latest)
for i in range(len(interval)):
if (i+1) < len(interval): # if it is not the last interval and there are more than 1 intervals
if i == 0: # if it is the first one
if earliest < interval[i][0]:
daytime.append((earliest, interval[i][0]))
daytime.append((interval[i][1],interval[i+1][0]))
else:
daytime.append((interval[i][1],interval[i+1][0]))
else:
daytime.append((interval[i][1], interval[i+1][0]))
else: # it is last one
if len(interval) == 1: # if there was only one
if earliest < interval[i][0]:
daytime.append((earliest, interval[i][0]))
if interval[i][1] < latest:
daytime.append((interval[i][1], latest))
else: # last but there were more than one
if interval[i][1] < latest:
daytime.append((interval[i][1], latest))
return daytime
# look for full 12 hour night periods
def get_12h_intervals(interval):
my_intervals = list()
for el in interval:
# convert time number to date in order to compare, 43200sec=12hours
if (md.num2date(el[1]) - md.num2date(el[0])).total_seconds() == 43200:
my_intervals.append(el)
return my_intervals
# returns average eating rate and standard error, and data to error(for ttest)
# takes as argument extracted data(list of common timestamps for all files) and result of get_12h_intervals function
def get_nights_rate(extracted_data, full_nights):
only_nights = [] # divide extracted data into single night (or day) intervals
for el in full_nights:
start, end = el
only_nights.append(extract_times(extracted_data, md.num2date(start), md.num2date(end)))
all_bins = [] # fill the bins for each night (or day) separately
for el in only_nights:
the_oldest, the_newest = get_border_times(el)
how_many_bins = get_number_of_bins(the_newest, the_oldest, bin)
all_bins.append(fill_bins(how_many_bins, el, the_oldest, bin))
# calculate rates for each night/day
rates_per_night = [get_rate(all_bins[i]) for i in range(len(all_bins))]
# extract from the above tuples only rates
rates = [rates_per_night[i][0] for i in range(len(rates_per_night))]
avg = sum(rates)/len(rates_per_night) # calculate total average rate
# concatenate all data (from all nights or days) for std error and ttest
data2err = []
for el in rates_per_night:
data2err.extend(el[1])
return avg, my_std_err(data2err), data2err
# returns full 12hour nights and days timestamps, where number of days = number of nights
def get_days_and_nights(extracted_data, full_nights, full_days):
# make full nights equal full days
while (len(full_days) != len(full_nights)):
if len(full_days) > len(full_nights):
del full_days[-1]
else:
del full_nights[-1]
start = full_nights[0][0] if full_nights[0][0] < full_days[0][0] else full_days[0][0]
end = full_nights[-1][1] if full_nights[-1][1] > full_days[-1][1] else full_days[-1][1]
return extract_times(extracted_data, md.num2date(start), md.num2date(end))
# function to find number of bins given 2 times and a desired time interval
# time difference is a timedelta type, it is first converted to seconds and divided by interval in seconds
def get_number_of_bins (latest, earliest, tinterval):
return int(math.floor((latest-earliest).total_seconds()/tinterval))
# fill each bin(number of bins=number of time intervals) according to the data from each file
# returns list of lists of bins (number of lists=number of files)
# takes as arguments number of all intervals(bins calculated from get_number_of_bins function),
# list of lists of timestamps (result of extract_times function), earliest common date, and time interval(e.g. 1hour=3600sec) in seconds
def fill_bins(intervalsNo, list_all, earliest, interval):
# create empty bins accorcing to the number of intervals
list_of_bins = [np.zeros(intervalsNo) for i in range(len(list_all))]
# fill the empty bins with timestamp count
for i in range(len(list_all)):
for j in range(len(list_all[i])):
tick = get_number_of_bins(list_all[i][j], earliest, interval)
if tick-1 < intervalsNo:
# subtract 1 from index=tick, because indexes start from 0
list_of_bins[i][tick-1] += 1
return list_of_bins
# returns a tuple of average rate and data to calculate std error
def get_rate(list_of_bins):
individual_rates = [sum(list_of_bins[i])/len(list_of_bins[i]) for i in range(len(list_of_bins))]
return sum(individual_rates)/len(individual_rates), individual_rates
# my std error function to calculate standard errors from given list
def my_std_err(my_list):
temp = 0
average = sum(my_list)/len(my_list)
for i in range(len(my_list)):
temp = temp + math.pow((my_list[i]-average), 2)
try:
std_dev = math.sqrt(temp)/math.sqrt(len(my_list)-1)
std_err = std_dev/math.sqrt(len(my_list))
except:
std_err = -1
return std_err
############################################### extracting data and calculations
# read all csv files from the folder in the given path=get data in the form of list of lists
# each list contains all timestamps from a single csv file
my_data = read_all(src)
start, end = get_border_times(my_data) # get first and last common date from all data
common_data = extract_times(my_data, start, end) # extract only common dates from all data to plot
nights = get_intervals(common_data[0], lights_out, lights_on, start, end) # get nighttime intervals
days = reverse_intervals(start, end, nights) #daytime intervals
full_nights_only = get_12h_intervals(nights) # list of tuples of start and end time of each night interval)
full_days_only = get_12h_intervals(days) # list of tuples of start and end time of each day interval)
common_days_nights = get_days_and_nights(common_data, full_nights_only, full_days_only) # equal number of days and nights
############################### print the analyzis in the console
do_stats = True # boolean to skip the stats if there was not enough information
night_rate, night_error, night2ttest = get_nights_rate(common_days_nights, full_nights_only)
print ("Pellets per hour by night: ", night_rate, "err: ", night_error)
day_rate, day_error, day2ttest = get_nights_rate(common_days_nights, full_days_only)
print ("Pellets per hour by night: ", day_rate,"err: ", day_error)
# ttest
# check if there was enough information to calculate the stats
if night_error == -1 or night_error == 0 or day_error == -1 or day_error == 0:
do_stats = False
popup = Tk()
popup.wm_title("!")
label = Label(popup, text="Not enough data to calculate\nstandard error and significance!\n\nPress 'ok' in Options window again\nto see the plot anyway.")
label.pack(side="top", fill="x", pady=10)
B1 = Button(popup, text="Ok", command = lambda: popup.withdraw())
B1.pack()
popup.mainloop()
else:
ttest, p = ttest_ind(night2ttest, day2ttest)
print ("p = ", p)
############################################################## plot
N = 2 # number of bars to plot(dark and light)
fig = plt.figure(facecolor='w')
x = np.arange(N) # arrange columns
ax1 = plt.subplot2grid((1,1),(0,0))
plt.ylabel('Eating rate (pellets/hour)')
ax1.set_frame_on(False)
y = [night_rate, day_rate]
if do_stats == True:
# yerr first in tuple is to first colunm second to second,
# first tuple is for positive values, second for negative
# drk, lght = plt.bar(x, y, width = 0.7, yerr=[(10,2),(10,2)])
drk, lght = plt.bar(x, y, width = 0.7, yerr=[(night_error,day_error),(night_error,day_error)], ecolor='k')
else:
drk, lght = plt.bar(x, y, width = 0.7)
centers = x + 0.5*drk.get_width() # align labels in the center
ax1.set_xticks(centers)
drk.set_facecolor('0.85') # shade of gray
lght.set_facecolor('w')
ax1.set_xticklabels(['Dark', 'Light'])
if do_stats == True:
# check p < 0.01(**), p < 0.05(*)
if p < 0.05:
text = '*' if p >= 0.01 else '**'
a = (centers[0] + centers[1])/2
b = 1.05*max(y[0],y[1])
dx = abs(centers[0]-centers[1])
props = {'connectionstyle':'bar','arrowstyle':'-',\
'shrinkA':20,'shrinkB':20,'lw':1}
# position the text in the middle on the top of the bar
ax1.annotate(text, xy=(centers[0]+(dx/2.2),1.5*b), zorder=10)
ax1.annotate('', xy=(centers[0],b), xytext=(centers[1],b), arrowprops=props)
plt.ylim(ymax=b+(0.6*b))
plt.show() | en | 0.835189 | Author: kravitzlab
Date: July 15 2016
Purpose: The application processes multiple files with timestamps(first column of a csv file) corresponding to the
single pellet retrieved by a mouse. It extracts only common full 12 hours daytime and nighttime intervals, in order
to later compare data sets from equal sized nighttime and daytime periods. User can define what were
the nighttime and daytime hours in the experiment. User can also define the time for calculating the eating rate
(between pellets per 1 min and per 2 hours).Then, according to the given parameters, the application plots a bar chart
with the results of analyzis and standard errors, and a statistical significance(ttest), if there is one
('*' for p<0.05, '**' for p<0.01). In addition, the program prints out the values in the console. Requirements: Anaconda(Python3.5)
Tested on Windows7. # default application variables in the initial options window # 30min interval in seconds(1800sec), lights out at 3pm, lights on at 3am # function to pop up the information about the problem # function to set variables according to the user input # function to create the options window with default variables displayed # create option window with entry fields # Set application constants accordingly # verify user input # display folders through Tkinter, tkFileDialog # set the path to the folder according to users choice ########################################## functions # Converts timestamp into a number # get data from a file (only the first column=date) # takes a csv file as an argument # returns a list of datetime elements( all timestamps) from this file # returns a list of lists # each list contains all timestamps from a single csv file from the folder (e.g. 8files=8lists within returned list) # it takes a path to the folder as an argument # if user manually points to nonexistent folder # os.listdir(path) lists all files in the directory form the given path # search only those that are csv files # get_data(filename) function will now read all of the timestamps from one fille # and add it in the form of list to the list_all # check if any data was read # returns the earliest common date and latest common date # we are interested only in the common time period # takes a list of lists of timestamps as an argument (result of read_all function) # append only the first timestamps from each file # append only the last timestamps from each file # the latest date in all_start_dates will be the earliest common date # find the earliest common # the earliest date in all_end_dates will be the latest common date # find the latest common # returns data from start to end date only (a list of lists of timestamps) # takes as an argument a list of lists of timestamps (result of read_all function) # and the earliest and latest common dates we want to plot (results of get_border_times function) # as soon as it finds start date, it stops iterating further # get the index for the start date in that list # as soon as it finds end date, it stops iterating # get the index for the end date in that list # add 1 for the list slicing to include that index # append only lists from start to end date # returns list of start-end tuples representing given interval of nighttime hours (number format) # takes as an argument: a single list of timestamps(one sample file), start_hour=beginning of nighttime, # end_hour=end of nighttime(24hours:1-00), and start and end time of a whole plot(data from: get_border_times(list_all)) # beginning of plot # end of plot # check how many dates(calendar days) are in the fed # for each date in fed, create start_hour-end_hour pair of night interval (datetime, number format) # start interval # makes sure it is not the last inteval # end interval ## it means it is the last interval # if there is only one day on the list check if the start interval is later than beginning # if the start interval hour is later than first timestamp, set the beginning of interval to beginning of plot # if the next date is in the list, set it to the end of nighttime, if not set the end of plot to be the end of nighttime # if the start hour on that date was earlier than the plot, set the first available to be the beginning of nighttime # lights out hour before noon # start interval # end interval # for the last interval or if it is the only one # if the start interval hour is later than first timestamp, set the beginning of interval to beginning of plot # if the next date is in the list, set it to the end of nighttime, if not set the end of plot to be the end of nighttime # if the start hour on that date was earlier than the plot, set the first available to be the beginning of nighttime # if it is not the last or first interval # returns daytime intervals based on nights # it takes as arguments start and end time of a whole plot(data from: get_border_times(list_all))=earliesr, latest # and nighttime intervals(result of get_intervals) # beginning of plot, convert to date # if it is not the last interval and there are more than 1 intervals # if it is the first one # it is last one # if there was only one # last but there were more than one # look for full 12 hour night periods # convert time number to date in order to compare, 43200sec=12hours # returns average eating rate and standard error, and data to error(for ttest) # takes as argument extracted data(list of common timestamps for all files) and result of get_12h_intervals function # divide extracted data into single night (or day) intervals # fill the bins for each night (or day) separately # calculate rates for each night/day # extract from the above tuples only rates # calculate total average rate # concatenate all data (from all nights or days) for std error and ttest # returns full 12hour nights and days timestamps, where number of days = number of nights # make full nights equal full days # function to find number of bins given 2 times and a desired time interval # time difference is a timedelta type, it is first converted to seconds and divided by interval in seconds # fill each bin(number of bins=number of time intervals) according to the data from each file # returns list of lists of bins (number of lists=number of files) # takes as arguments number of all intervals(bins calculated from get_number_of_bins function), # list of lists of timestamps (result of extract_times function), earliest common date, and time interval(e.g. 1hour=3600sec) in seconds # create empty bins accorcing to the number of intervals # fill the empty bins with timestamp count # subtract 1 from index=tick, because indexes start from 0 # returns a tuple of average rate and data to calculate std error # my std error function to calculate standard errors from given list ############################################### extracting data and calculations # read all csv files from the folder in the given path=get data in the form of list of lists # each list contains all timestamps from a single csv file # get first and last common date from all data # extract only common dates from all data to plot # get nighttime intervals #daytime intervals # list of tuples of start and end time of each night interval) # list of tuples of start and end time of each day interval) # equal number of days and nights ############################### print the analyzis in the console # boolean to skip the stats if there was not enough information # ttest # check if there was enough information to calculate the stats ############################################################## plot # number of bars to plot(dark and light) # arrange columns # yerr first in tuple is to first colunm second to second, # first tuple is for positive values, second for negative # drk, lght = plt.bar(x, y, width = 0.7, yerr=[(10,2),(10,2)]) # align labels in the center # shade of gray # check p < 0.01(**), p < 0.05(*) # position the text in the middle on the top of the bar | 3.600949 | 4 |
pandapower/test/test_results.py | lucassm/cigre-montecarlo | 0 | 6620026 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import pandapower as pp
import pytest
from pandapower.test.consistency_checks import runpp_with_consistency_checks
from pandapower.test.result_test_network_generator import add_test_enforce_qlims, add_test_gen
def test_line(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_line"]
lines = [x for x in net.line.index if net.line.from_bus[x] in buses.index]
l1 = lines[0]
l2 = lines[1]
l3 = lines[2]
b2 = buses.index[1]
# result values from powerfactory
load1 = 14.578
load2 = 8.385
ika1 = 0.04665
ika2 = 0.0134
p_from1 = 1202.21
p_from2 = 0.132
q_from1 = 167.390
q_from2 = -469.371
p_to1 = -1200.000
p_to2 = 0.000
q_to1 = -1100.000
q_to2 = 0.0000
v = 1.007395422
# line 1
assert abs(net.res_line.loading_percent.at[l1] - load1) < 1e-2
assert abs(net.res_line.i_ka.at[l1] - ika1) < 1e-2
assert abs(net.res_line.p_from_kw.at[l1] - p_from1) < 1e-2
assert abs(net.res_line.q_from_kvar.at[l1] - q_from1) < 1e-2
assert abs(net.res_line.p_to_kw.at[l1] - p_to1) < 1e-2
assert abs(net.res_line.q_to_kvar.at[l1] - q_to1) < 1e-2
# line2 (open switch line)
assert abs(net.res_line.loading_percent.at[l2] - load2) < 1e-2
assert abs(net.res_line.i_ka.at[l2] - ika2) < 1e-2
assert abs(net.res_line.p_from_kw.at[l2] - p_from2) < 1e-2
assert abs(net.res_line.q_from_kvar.at[l2] - q_from2) < 1e-2
assert abs(net.res_line.p_to_kw.at[l2] - p_to2) < 1e-2
assert abs(net.res_line.q_to_kvar.at[l2] - q_to2) < 1e-2
assert abs(net.res_bus.vm_pu.at[b2] - v) < 1e-8
# line3 (of out of service line)
assert abs(net.res_line.loading_percent.at[l3] - 0) < 1e-2
assert abs(net.res_line.i_ka.at[l3] - 0) < 1e-2
assert abs(net.res_line.p_from_kw.at[l3] - 0) < 1e-2
assert abs(net.res_line.q_from_kvar.at[l3] - 0) < 1e-2
assert abs(net.res_line.p_to_kw.at[l3] - 0) < 1e-2
assert abs(net.res_line.q_to_kvar.at[l3] - 0) < 1e-2
def test_load_sgen(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_load_sgen"]
loads = [x for x in net.load.index if net.load.bus[x] in buses.index]
sgens = [x for x in net.sgen.index if net.sgen.bus[x] in buses.index]
l1 = loads[0]
sg1 = sgens[0]
b2 = buses.index[1]
# result values from powerfactory
pl1 = 1200.000
ql1 = 1100.000
qs1 = -100.000
ps1 = 500.000
u = 1.00477465
assert abs(net.res_load.p_kw.at[l1] - pl1) < 1e-2
assert abs(net.res_load.q_kvar.at[l1] - ql1) < 1e-2
# pf uses generator system
assert abs(net.res_sgen.p_kw.at[sg1] - (- ps1)) < 1e-2
# pf uses generator system
assert abs(net.res_sgen.q_kvar.at[sg1] - (-qs1)) < 1e-2
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-8
def test_load_sgen_split(result_test_network):
# splitting up the load/sgen should not change the result
net = result_test_network
buses = net.bus[net.bus.zone == "test_load_sgen_split"]
b2 = buses.index[1]
u = 1.00477465
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-8
def test_trafo(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_trafo"]
trafos = [x for x in net.trafo.index if net.trafo.hv_bus[x] in buses.index]
t1 = trafos[0]
t2 = trafos[1]
t3 = trafos[2]
b2 = buses.index[1]
b3 = buses.index[2]
# powerfactory results (to check t-equivalent circuit model)
runpp_with_consistency_checks(net, trafo_model="t", trafo_loading="current")
load1 = 56.7348
load2 = 5.0478
ph1 = 222.4211
ph2 = 20.3943
qh1 = 55.4248
qh2 = 0.0362
pl1 = -199.9981
pl2 = 0
ql1 = -49.9957
ql2 = 0
ih1 = 0.006551
ih2 = 0.000583
il1 = 0.299500
il2 = 0
v2 = 1.01006174
v3 = 0.99350859
assert abs(net.res_trafo.loading_percent.at[t1] - load1) < 1e-1
assert abs(net.res_trafo.p_hv_kw.at[t1] - ph1) < 1e-1
assert abs(net.res_trafo.q_hv_kvar.at[t1] - qh1) < 1e-1
assert abs(net.res_trafo.p_lv_kw.at[t1] - pl1) < 1e-1
assert abs(net.res_trafo.q_lv_kvar.at[t1] - ql1) < 1e-1
assert abs(net.res_trafo.i_hv_ka.at[t1] - ih1) < 1e-1
assert abs(net.res_trafo.i_lv_ka.at[t1] - il1) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t2] - load2) < 1e-1
assert abs(net.res_trafo.p_hv_kw.at[t2] - ph2) < 1e-1
assert abs(net.res_trafo.q_hv_kvar.at[t2] - qh2) < 1e-1
assert abs(net.res_trafo.p_lv_kw.at[t2] - pl2) < 1e-1
assert abs(net.res_trafo.q_lv_kvar.at[t2] - ql2) < 1e-1
assert abs(net.res_trafo.i_hv_ka.at[t2] - ih2) < 1e-1
assert abs(net.res_trafo.i_lv_ka.at[t2] - il2) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.p_hv_kw.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.q_hv_kvar.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.p_lv_kw.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.q_lv_kvar.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.i_hv_ka.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.i_lv_ka.at[t3] - 0) < 1e-1
assert abs(net.res_bus.vm_pu.at[b2] - v2) < 1e-6
assert abs(net.res_bus.vm_pu.at[b3] - v3) < 1e-6
# # sincal results (to check pi-equivalent circuit model)
runpp_with_consistency_checks(net, trafo_model="pi", trafo_loading="current")
load1 = 56.76
load2 = 5.049
v2 = 1.010061887962
v3 = 0.9935012394385
assert abs(net.res_trafo.loading_percent.at[t1] - load1) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t2] - load2) < 1e-1
assert abs(net.res_bus.vm_pu.at[b2] - v2) < 1e-6
assert abs(net.res_bus.vm_pu.at[b3] - v3) < 1e-6
runpp_with_consistency_checks(net, trafo_model="pi", trafo_loading="power")
load1 = 57.307
load2 = 5.10
assert abs(net.res_trafo.loading_percent.at[t1] - load1) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t2] - load2) < 1e-1
def test_trafo_tap(result_test_network):
net = result_test_network
runpp_with_consistency_checks(net, trafo_model="t", trafo_loading="current")
buses = net.bus[net.bus.zone == "test_trafo_tap"]
b2 = buses.index[1]
b3 = buses.index[2]
assert (1.010114175 - net.res_bus.vm_pu.at[b2]) < 1e-6
assert (0.924072090 - net.res_bus.vm_pu.at[b3]) < 1e-6
#def test_shunt(net):
# b1, b2, ln = add_grid_connection(net)
# pz = 1200
# qz = 1100
# # one shunt at a bus
# pp.create_shunt(net, b2, p_kw=pz, q_kvar=qz)
# runpp_with_consistency_checks(net)
#
# # u = 0.99061732759039389
# # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
#
# # add out of service shunt shuold not change the result
# pp.create_shunt(net, b2, p_kw=pz, q_kvar=qz, in_service=False)
# runpp_with_consistency_checks(net)
#
# # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
#
# # splitting up the shunts should not change results
# b1, b2, ln = add_grid_connection(net)
# pp.create_shunt(net, b2, p_kw=pz/2, q_kvar=qz/2)
# pp.create_shunt(net, b2, p_kw=pz/2, q_kvar=qz/2)
# runpp_with_consistency_checks(net)
# # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
#
def test_ext_grid(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_ext_grid"]
ext_grids = [
x for x in net.ext_grid.index if net.ext_grid.bus[x] in buses.index]
eg1 = ext_grids[0]
eg2 = ext_grids[1]
# results from powerfactory
p1 = -1273.6434
q1 = -2145.0519
p2 = 1286.2537
q2 = 1690.1253
assert abs(net.res_ext_grid.p_kw.at[eg1] - (-p1))
assert abs(net.res_ext_grid.q_kvar.at[eg1] - (-q1))
assert abs(net.res_ext_grid.p_kw.at[eg2] - (-p2))
assert abs(net.res_ext_grid.q_kvar.at[eg2] - (-q2))
def test_ward(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_ward"]
wards = [x for x in net.ward.index if net.ward.bus[x] in buses.index]
b2 = buses.index[1]
w1 = wards[0]
# powerfactory results
pw = -1704.6146
qw = -1304.2294
u = 1.00192121
assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
assert abs(net.res_ward.p_kw.loc[w1] - (-pw)) < 1e-1
assert abs(net.res_ward.q_kvar.loc[w1] - (-qw)) < 1e-1
def test_ward_split(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_ward_split"]
wards = [x for x in net.ward.index if net.ward.bus[x] in buses.index]
b2 = buses.index[1]
w1 = wards[0]
w2 = wards[1]
# powerfactory results
pw = -1704.6146
qw = -1304.2294
u = 1.00192121
assert abs(net.res_bus.vm_pu.at[b2] - u)
assert abs(net.res_ward.p_kw.loc[[w1, w2]].sum() - (-pw))
assert abs(net.res_ward.q_kvar.loc[[w1, w2]].sum() - (-qw))
#
def test_xward(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_xward"]
xwards = [x for x in net.xward.index if net.xward.bus[x] in buses.index]
b2 = buses.index[1]
xw1 = xwards[0]
xw2 = xwards[1] # Out of servic xward
# powerfactory result for 1 xward
u = 1.00308684
pxw = -1721.0343
qxw = -975.9919
#
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-2
assert abs(net.res_xward.p_kw.at[xw1] - (-pxw)) < 1e-2
assert abs(net.res_xward.q_kvar.at[xw1] - (-qxw)) < 1e-2
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-2
assert abs(net.res_xward.p_kw.loc[[xw1, xw2]].sum() - (-pxw)) < 1e-2
assert abs(net.res_xward.q_kvar.loc[[xw1, xw2]].sum() - (-qxw)) < 1e-2
def test_xward_combination(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_xward_combination"]
xwards = [x for x in net.xward.index if net.xward.bus[x] in buses.index]
b2 = buses.index[1]
xw1 = xwards[0]
xw3 = xwards[2]
# powerfactory result for 2 active xwards
u = 0.99568034
pxw1 = -1707.1063
pxw3 = -1707.1063
qxw1 = -918.7192
qxw3 = -918.7192
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-2
assert abs(net.res_xward.p_kw.at[xw1] - (-pxw1)) < 1e-1
assert abs(net.res_xward.q_kvar.at[xw1] - (-qxw1)) < 1e-1
assert abs(net.res_xward.p_kw.at[xw3] - (-pxw3)) < 1e-1
assert abs(net.res_xward.q_kvar.at[xw3] - (-qxw3)) < 1e-1
def test_gen(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_gen"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
# powerfactory results
q = -260.660
u2 = 1.00584636
u_set = 1.0
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-8
assert abs(net.res_bus.vm_pu.at[b3] - u_set) < 1e-8
assert abs(net.res_gen.q_kvar.at[g1] - (-q)) < 1e-1
def test_enforce_qlims(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_enforce_qlims"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
# enforce reactive power limits
runpp_with_consistency_checks(net, enforce_q_lims=True)
# powerfactory results
u2 = 1.00607194
u3 = 1.00045091
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-2
assert abs(net.res_bus.vm_pu.at[b3] - u3) < 1e-2
assert abs(net.res_gen.q_kvar.at[g1] - net.gen.max_q_kvar.at[g1]) < 1e-2
#
#
def test_trafo3w(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_trafo3w"]
trafos = [x for x in net.trafo3w.index if net.trafo3w.hv_bus[
x] in buses.index]
runpp_with_consistency_checks(net, trafo_model="t")
b2 = buses.index[1]
b3 = buses.index[2]
b4 = buses.index[3]
t3 = trafos[0]
uhv = 1.00895246
umv = 1.00440765
ulv = 1.00669961
load = 68.261
qhv = 154.60
qmv = -100.00
qlv = -50.00
phv = 551.43
pmv = -300.00
plv = -200.00
assert abs((net.res_bus.vm_pu.at[b2] - uhv)) < 1e-4
assert abs((net.res_bus.vm_pu.at[b3] - umv)) < 1e-4
assert abs((net.res_bus.vm_pu.at[b4] - ulv)) < 1e-4
assert abs((net.res_trafo3w.loading_percent.at[t3] - load)) < 1e-2
assert abs((net.res_trafo3w.p_hv_kw.at[t3] - phv)) < 1
assert abs((net.res_trafo3w.p_mv_kw.at[t3] - pmv)) < 1
assert abs((net.res_trafo3w.p_lv_kw.at[t3] - plv)) < 1
assert abs((net.res_trafo3w.q_hv_kvar.at[t3] - qhv)) < 1
assert abs((net.res_trafo3w.q_mv_kvar.at[t3] - qmv)) < 1
assert abs((net.res_trafo3w.q_lv_kvar.at[t3] - qlv)) < 1
# power transformer loading
runpp_with_consistency_checks(net, trafo_model="t", trafo_loading="power")
load_p = 68.718
assert abs((net.res_trafo3w.loading_percent.at[t3] - load_p)) < 1e-2
def test_impedance(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_impedance"]
impedances = [
x for x in net.impedance.index if net.impedance.from_bus[x] in buses.index]
runpp_with_consistency_checks(net, trafo_model="t")
b2 = buses.index[1]
b3 = buses.index[2]
imp1 = impedances[0]
# powerfactory results
ifrom = 0.0325
ito = 0.0030
pfrom = 1012.6480
qfrom = 506.3231
pto = -999.9960
qto = -499.9971
u2 = 1.00654678
u3 = 0.99397101
assert abs(net.res_impedance.p_from_kw.at[imp1] - pfrom) < 1e-1
assert abs(net.res_impedance.p_to_kw.at[imp1] - pto) < 1e-1
assert abs(net.res_impedance.q_from_kvar.at[imp1] - qfrom) < 1e-1
assert abs(net.res_impedance.q_to_kvar.at[imp1] - qto) < 1e-1
assert abs(net.res_impedance.i_from_ka.at[imp1] - ifrom) < 1e-1
assert abs(net.res_impedance.i_to_ka.at[imp1] - ito) < 1e-1
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-6
assert abs(net.res_bus.vm_pu.at[b3] - u3) < 1e-6
def test_bus_bus_switch(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_bus_bus_switch"]
b2 = buses.index[1]
b3 = buses.index[2]
# powerfactory voltage
u = 0.982265380
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-5
assert abs(net.res_bus.vm_pu.at[b3] - u) < 1e-5
assert abs(net.res_bus.vm_pu.at[b2] == net.res_bus.vm_pu.at[b2])
def test_enforce_q_lims():
""" Test for enforce_q_lims loadflow option
"""
# net = pp.test.create_test_network()
# net.gen.max_q_kvar = 1000
# net.gen.min_q_kvar = -1000
# pp.runpp(net, enforce_q_lims=True)
net = pp.create_empty_network()
# test_gen
net = add_test_gen(net)
pp.runpp(net)
buses = net.bus[net.bus.zone == "test_gen"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
# b1=buses.index[0]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
q = -260.660
u2 = 1.00584636
u_set = 1.0
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-8
assert abs(net.res_bus.vm_pu.at[b3] - u_set) < 1e-8
assert abs(net.res_gen.q_kvar.at[g1] - (-q)) < 1e-1
# test_enforce_qlims
net = add_test_enforce_qlims(net)
pp.runpp(net, enforce_q_lims=True)
buses = net.bus[net.bus.zone == "test_enforce_qlims"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
u2 = 1.00607194
u3 = 1.00045091
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-2
assert abs(net.res_bus.vm_pu.at[b3] - u3) < 1e-2
assert abs(net.res_gen.q_kvar.at[g1] - net.gen.max_q_kvar.at[g1]) < 1e-2
def test_shunt(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_shunt"]
b2 = buses.index[1]
shunts = [x for x in net.shunt.index if net.shunt.bus[x] in buses.index]
s1 = shunts[0]
u = 1.015007
p = 123.628741
q = -1236.287413
assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
assert abs(net.res_shunt.p_kw.loc[s1] - p) < 1e-6
assert abs(net.res_shunt.q_kvar.loc[s1] - q) < 1e-6
def test_shunt_split(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_shunt_split"]
b2 = buses.index[1]
shunts = [x for x in net.shunt.index if net.shunt.bus[x] in buses.index]
s1 = shunts[0]
u = 1.015007
p = 123.628741
q = -1236.287413
assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
assert abs(net.res_shunt.p_kw.loc[s1] - p/2) < 1e-6
assert abs(net.res_shunt.q_kvar.loc[s1] - q/2) < 1e-6
# def test_trafo3w_tap(net):
# TODO
if __name__ == "__main__":
pytest.main(["test_results.py", "-s"])
| # -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import pandapower as pp
import pytest
from pandapower.test.consistency_checks import runpp_with_consistency_checks
from pandapower.test.result_test_network_generator import add_test_enforce_qlims, add_test_gen
def test_line(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_line"]
lines = [x for x in net.line.index if net.line.from_bus[x] in buses.index]
l1 = lines[0]
l2 = lines[1]
l3 = lines[2]
b2 = buses.index[1]
# result values from powerfactory
load1 = 14.578
load2 = 8.385
ika1 = 0.04665
ika2 = 0.0134
p_from1 = 1202.21
p_from2 = 0.132
q_from1 = 167.390
q_from2 = -469.371
p_to1 = -1200.000
p_to2 = 0.000
q_to1 = -1100.000
q_to2 = 0.0000
v = 1.007395422
# line 1
assert abs(net.res_line.loading_percent.at[l1] - load1) < 1e-2
assert abs(net.res_line.i_ka.at[l1] - ika1) < 1e-2
assert abs(net.res_line.p_from_kw.at[l1] - p_from1) < 1e-2
assert abs(net.res_line.q_from_kvar.at[l1] - q_from1) < 1e-2
assert abs(net.res_line.p_to_kw.at[l1] - p_to1) < 1e-2
assert abs(net.res_line.q_to_kvar.at[l1] - q_to1) < 1e-2
# line2 (open switch line)
assert abs(net.res_line.loading_percent.at[l2] - load2) < 1e-2
assert abs(net.res_line.i_ka.at[l2] - ika2) < 1e-2
assert abs(net.res_line.p_from_kw.at[l2] - p_from2) < 1e-2
assert abs(net.res_line.q_from_kvar.at[l2] - q_from2) < 1e-2
assert abs(net.res_line.p_to_kw.at[l2] - p_to2) < 1e-2
assert abs(net.res_line.q_to_kvar.at[l2] - q_to2) < 1e-2
assert abs(net.res_bus.vm_pu.at[b2] - v) < 1e-8
# line3 (of out of service line)
assert abs(net.res_line.loading_percent.at[l3] - 0) < 1e-2
assert abs(net.res_line.i_ka.at[l3] - 0) < 1e-2
assert abs(net.res_line.p_from_kw.at[l3] - 0) < 1e-2
assert abs(net.res_line.q_from_kvar.at[l3] - 0) < 1e-2
assert abs(net.res_line.p_to_kw.at[l3] - 0) < 1e-2
assert abs(net.res_line.q_to_kvar.at[l3] - 0) < 1e-2
def test_load_sgen(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_load_sgen"]
loads = [x for x in net.load.index if net.load.bus[x] in buses.index]
sgens = [x for x in net.sgen.index if net.sgen.bus[x] in buses.index]
l1 = loads[0]
sg1 = sgens[0]
b2 = buses.index[1]
# result values from powerfactory
pl1 = 1200.000
ql1 = 1100.000
qs1 = -100.000
ps1 = 500.000
u = 1.00477465
assert abs(net.res_load.p_kw.at[l1] - pl1) < 1e-2
assert abs(net.res_load.q_kvar.at[l1] - ql1) < 1e-2
# pf uses generator system
assert abs(net.res_sgen.p_kw.at[sg1] - (- ps1)) < 1e-2
# pf uses generator system
assert abs(net.res_sgen.q_kvar.at[sg1] - (-qs1)) < 1e-2
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-8
def test_load_sgen_split(result_test_network):
# splitting up the load/sgen should not change the result
net = result_test_network
buses = net.bus[net.bus.zone == "test_load_sgen_split"]
b2 = buses.index[1]
u = 1.00477465
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-8
def test_trafo(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_trafo"]
trafos = [x for x in net.trafo.index if net.trafo.hv_bus[x] in buses.index]
t1 = trafos[0]
t2 = trafos[1]
t3 = trafos[2]
b2 = buses.index[1]
b3 = buses.index[2]
# powerfactory results (to check t-equivalent circuit model)
runpp_with_consistency_checks(net, trafo_model="t", trafo_loading="current")
load1 = 56.7348
load2 = 5.0478
ph1 = 222.4211
ph2 = 20.3943
qh1 = 55.4248
qh2 = 0.0362
pl1 = -199.9981
pl2 = 0
ql1 = -49.9957
ql2 = 0
ih1 = 0.006551
ih2 = 0.000583
il1 = 0.299500
il2 = 0
v2 = 1.01006174
v3 = 0.99350859
assert abs(net.res_trafo.loading_percent.at[t1] - load1) < 1e-1
assert abs(net.res_trafo.p_hv_kw.at[t1] - ph1) < 1e-1
assert abs(net.res_trafo.q_hv_kvar.at[t1] - qh1) < 1e-1
assert abs(net.res_trafo.p_lv_kw.at[t1] - pl1) < 1e-1
assert abs(net.res_trafo.q_lv_kvar.at[t1] - ql1) < 1e-1
assert abs(net.res_trafo.i_hv_ka.at[t1] - ih1) < 1e-1
assert abs(net.res_trafo.i_lv_ka.at[t1] - il1) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t2] - load2) < 1e-1
assert abs(net.res_trafo.p_hv_kw.at[t2] - ph2) < 1e-1
assert abs(net.res_trafo.q_hv_kvar.at[t2] - qh2) < 1e-1
assert abs(net.res_trafo.p_lv_kw.at[t2] - pl2) < 1e-1
assert abs(net.res_trafo.q_lv_kvar.at[t2] - ql2) < 1e-1
assert abs(net.res_trafo.i_hv_ka.at[t2] - ih2) < 1e-1
assert abs(net.res_trafo.i_lv_ka.at[t2] - il2) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.p_hv_kw.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.q_hv_kvar.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.p_lv_kw.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.q_lv_kvar.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.i_hv_ka.at[t3] - 0) < 1e-1
assert abs(net.res_trafo.i_lv_ka.at[t3] - 0) < 1e-1
assert abs(net.res_bus.vm_pu.at[b2] - v2) < 1e-6
assert abs(net.res_bus.vm_pu.at[b3] - v3) < 1e-6
# # sincal results (to check pi-equivalent circuit model)
runpp_with_consistency_checks(net, trafo_model="pi", trafo_loading="current")
load1 = 56.76
load2 = 5.049
v2 = 1.010061887962
v3 = 0.9935012394385
assert abs(net.res_trafo.loading_percent.at[t1] - load1) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t2] - load2) < 1e-1
assert abs(net.res_bus.vm_pu.at[b2] - v2) < 1e-6
assert abs(net.res_bus.vm_pu.at[b3] - v3) < 1e-6
runpp_with_consistency_checks(net, trafo_model="pi", trafo_loading="power")
load1 = 57.307
load2 = 5.10
assert abs(net.res_trafo.loading_percent.at[t1] - load1) < 1e-1
assert abs(net.res_trafo.loading_percent.at[t2] - load2) < 1e-1
def test_trafo_tap(result_test_network):
net = result_test_network
runpp_with_consistency_checks(net, trafo_model="t", trafo_loading="current")
buses = net.bus[net.bus.zone == "test_trafo_tap"]
b2 = buses.index[1]
b3 = buses.index[2]
assert (1.010114175 - net.res_bus.vm_pu.at[b2]) < 1e-6
assert (0.924072090 - net.res_bus.vm_pu.at[b3]) < 1e-6
#def test_shunt(net):
# b1, b2, ln = add_grid_connection(net)
# pz = 1200
# qz = 1100
# # one shunt at a bus
# pp.create_shunt(net, b2, p_kw=pz, q_kvar=qz)
# runpp_with_consistency_checks(net)
#
# # u = 0.99061732759039389
# # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
#
# # add out of service shunt shuold not change the result
# pp.create_shunt(net, b2, p_kw=pz, q_kvar=qz, in_service=False)
# runpp_with_consistency_checks(net)
#
# # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
#
# # splitting up the shunts should not change results
# b1, b2, ln = add_grid_connection(net)
# pp.create_shunt(net, b2, p_kw=pz/2, q_kvar=qz/2)
# pp.create_shunt(net, b2, p_kw=pz/2, q_kvar=qz/2)
# runpp_with_consistency_checks(net)
# # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
#
def test_ext_grid(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_ext_grid"]
ext_grids = [
x for x in net.ext_grid.index if net.ext_grid.bus[x] in buses.index]
eg1 = ext_grids[0]
eg2 = ext_grids[1]
# results from powerfactory
p1 = -1273.6434
q1 = -2145.0519
p2 = 1286.2537
q2 = 1690.1253
assert abs(net.res_ext_grid.p_kw.at[eg1] - (-p1))
assert abs(net.res_ext_grid.q_kvar.at[eg1] - (-q1))
assert abs(net.res_ext_grid.p_kw.at[eg2] - (-p2))
assert abs(net.res_ext_grid.q_kvar.at[eg2] - (-q2))
def test_ward(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_ward"]
wards = [x for x in net.ward.index if net.ward.bus[x] in buses.index]
b2 = buses.index[1]
w1 = wards[0]
# powerfactory results
pw = -1704.6146
qw = -1304.2294
u = 1.00192121
assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
assert abs(net.res_ward.p_kw.loc[w1] - (-pw)) < 1e-1
assert abs(net.res_ward.q_kvar.loc[w1] - (-qw)) < 1e-1
def test_ward_split(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_ward_split"]
wards = [x for x in net.ward.index if net.ward.bus[x] in buses.index]
b2 = buses.index[1]
w1 = wards[0]
w2 = wards[1]
# powerfactory results
pw = -1704.6146
qw = -1304.2294
u = 1.00192121
assert abs(net.res_bus.vm_pu.at[b2] - u)
assert abs(net.res_ward.p_kw.loc[[w1, w2]].sum() - (-pw))
assert abs(net.res_ward.q_kvar.loc[[w1, w2]].sum() - (-qw))
#
def test_xward(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_xward"]
xwards = [x for x in net.xward.index if net.xward.bus[x] in buses.index]
b2 = buses.index[1]
xw1 = xwards[0]
xw2 = xwards[1] # Out of servic xward
# powerfactory result for 1 xward
u = 1.00308684
pxw = -1721.0343
qxw = -975.9919
#
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-2
assert abs(net.res_xward.p_kw.at[xw1] - (-pxw)) < 1e-2
assert abs(net.res_xward.q_kvar.at[xw1] - (-qxw)) < 1e-2
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-2
assert abs(net.res_xward.p_kw.loc[[xw1, xw2]].sum() - (-pxw)) < 1e-2
assert abs(net.res_xward.q_kvar.loc[[xw1, xw2]].sum() - (-qxw)) < 1e-2
def test_xward_combination(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_xward_combination"]
xwards = [x for x in net.xward.index if net.xward.bus[x] in buses.index]
b2 = buses.index[1]
xw1 = xwards[0]
xw3 = xwards[2]
# powerfactory result for 2 active xwards
u = 0.99568034
pxw1 = -1707.1063
pxw3 = -1707.1063
qxw1 = -918.7192
qxw3 = -918.7192
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-2
assert abs(net.res_xward.p_kw.at[xw1] - (-pxw1)) < 1e-1
assert abs(net.res_xward.q_kvar.at[xw1] - (-qxw1)) < 1e-1
assert abs(net.res_xward.p_kw.at[xw3] - (-pxw3)) < 1e-1
assert abs(net.res_xward.q_kvar.at[xw3] - (-qxw3)) < 1e-1
def test_gen(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_gen"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
# powerfactory results
q = -260.660
u2 = 1.00584636
u_set = 1.0
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-8
assert abs(net.res_bus.vm_pu.at[b3] - u_set) < 1e-8
assert abs(net.res_gen.q_kvar.at[g1] - (-q)) < 1e-1
def test_enforce_qlims(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_enforce_qlims"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
# enforce reactive power limits
runpp_with_consistency_checks(net, enforce_q_lims=True)
# powerfactory results
u2 = 1.00607194
u3 = 1.00045091
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-2
assert abs(net.res_bus.vm_pu.at[b3] - u3) < 1e-2
assert abs(net.res_gen.q_kvar.at[g1] - net.gen.max_q_kvar.at[g1]) < 1e-2
#
#
def test_trafo3w(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_trafo3w"]
trafos = [x for x in net.trafo3w.index if net.trafo3w.hv_bus[
x] in buses.index]
runpp_with_consistency_checks(net, trafo_model="t")
b2 = buses.index[1]
b3 = buses.index[2]
b4 = buses.index[3]
t3 = trafos[0]
uhv = 1.00895246
umv = 1.00440765
ulv = 1.00669961
load = 68.261
qhv = 154.60
qmv = -100.00
qlv = -50.00
phv = 551.43
pmv = -300.00
plv = -200.00
assert abs((net.res_bus.vm_pu.at[b2] - uhv)) < 1e-4
assert abs((net.res_bus.vm_pu.at[b3] - umv)) < 1e-4
assert abs((net.res_bus.vm_pu.at[b4] - ulv)) < 1e-4
assert abs((net.res_trafo3w.loading_percent.at[t3] - load)) < 1e-2
assert abs((net.res_trafo3w.p_hv_kw.at[t3] - phv)) < 1
assert abs((net.res_trafo3w.p_mv_kw.at[t3] - pmv)) < 1
assert abs((net.res_trafo3w.p_lv_kw.at[t3] - plv)) < 1
assert abs((net.res_trafo3w.q_hv_kvar.at[t3] - qhv)) < 1
assert abs((net.res_trafo3w.q_mv_kvar.at[t3] - qmv)) < 1
assert abs((net.res_trafo3w.q_lv_kvar.at[t3] - qlv)) < 1
# power transformer loading
runpp_with_consistency_checks(net, trafo_model="t", trafo_loading="power")
load_p = 68.718
assert abs((net.res_trafo3w.loading_percent.at[t3] - load_p)) < 1e-2
def test_impedance(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_impedance"]
impedances = [
x for x in net.impedance.index if net.impedance.from_bus[x] in buses.index]
runpp_with_consistency_checks(net, trafo_model="t")
b2 = buses.index[1]
b3 = buses.index[2]
imp1 = impedances[0]
# powerfactory results
ifrom = 0.0325
ito = 0.0030
pfrom = 1012.6480
qfrom = 506.3231
pto = -999.9960
qto = -499.9971
u2 = 1.00654678
u3 = 0.99397101
assert abs(net.res_impedance.p_from_kw.at[imp1] - pfrom) < 1e-1
assert abs(net.res_impedance.p_to_kw.at[imp1] - pto) < 1e-1
assert abs(net.res_impedance.q_from_kvar.at[imp1] - qfrom) < 1e-1
assert abs(net.res_impedance.q_to_kvar.at[imp1] - qto) < 1e-1
assert abs(net.res_impedance.i_from_ka.at[imp1] - ifrom) < 1e-1
assert abs(net.res_impedance.i_to_ka.at[imp1] - ito) < 1e-1
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-6
assert abs(net.res_bus.vm_pu.at[b3] - u3) < 1e-6
def test_bus_bus_switch(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_bus_bus_switch"]
b2 = buses.index[1]
b3 = buses.index[2]
# powerfactory voltage
u = 0.982265380
assert abs(net.res_bus.vm_pu.at[b2] - u) < 1e-5
assert abs(net.res_bus.vm_pu.at[b3] - u) < 1e-5
assert abs(net.res_bus.vm_pu.at[b2] == net.res_bus.vm_pu.at[b2])
def test_enforce_q_lims():
""" Test for enforce_q_lims loadflow option
"""
# net = pp.test.create_test_network()
# net.gen.max_q_kvar = 1000
# net.gen.min_q_kvar = -1000
# pp.runpp(net, enforce_q_lims=True)
net = pp.create_empty_network()
# test_gen
net = add_test_gen(net)
pp.runpp(net)
buses = net.bus[net.bus.zone == "test_gen"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
# b1=buses.index[0]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
q = -260.660
u2 = 1.00584636
u_set = 1.0
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-8
assert abs(net.res_bus.vm_pu.at[b3] - u_set) < 1e-8
assert abs(net.res_gen.q_kvar.at[g1] - (-q)) < 1e-1
# test_enforce_qlims
net = add_test_enforce_qlims(net)
pp.runpp(net, enforce_q_lims=True)
buses = net.bus[net.bus.zone == "test_enforce_qlims"]
gens = [x for x in net.gen.index if net.gen.bus[x] in buses.index]
b2 = buses.index[1]
b3 = buses.index[2]
g1 = gens[0]
u2 = 1.00607194
u3 = 1.00045091
assert abs(net.res_bus.vm_pu.at[b2] - u2) < 1e-2
assert abs(net.res_bus.vm_pu.at[b3] - u3) < 1e-2
assert abs(net.res_gen.q_kvar.at[g1] - net.gen.max_q_kvar.at[g1]) < 1e-2
def test_shunt(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_shunt"]
b2 = buses.index[1]
shunts = [x for x in net.shunt.index if net.shunt.bus[x] in buses.index]
s1 = shunts[0]
u = 1.015007
p = 123.628741
q = -1236.287413
assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
assert abs(net.res_shunt.p_kw.loc[s1] - p) < 1e-6
assert abs(net.res_shunt.q_kvar.loc[s1] - q) < 1e-6
def test_shunt_split(result_test_network):
net = result_test_network
buses = net.bus[net.bus.zone == "test_shunt_split"]
b2 = buses.index[1]
shunts = [x for x in net.shunt.index if net.shunt.bus[x] in buses.index]
s1 = shunts[0]
u = 1.015007
p = 123.628741
q = -1236.287413
assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6
assert abs(net.res_shunt.p_kw.loc[s1] - p/2) < 1e-6
assert abs(net.res_shunt.q_kvar.loc[s1] - q/2) < 1e-6
# def test_trafo3w_tap(net):
# TODO
if __name__ == "__main__":
pytest.main(["test_results.py", "-s"])
| en | 0.588127 | # -*- coding: utf-8 -*- # Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy # System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. # result values from powerfactory # line 1 # line2 (open switch line) # line3 (of out of service line) # result values from powerfactory # pf uses generator system # pf uses generator system # splitting up the load/sgen should not change the result # powerfactory results (to check t-equivalent circuit model) # # sincal results (to check pi-equivalent circuit model) #def test_shunt(net): # b1, b2, ln = add_grid_connection(net) # pz = 1200 # qz = 1100 # # one shunt at a bus # pp.create_shunt(net, b2, p_kw=pz, q_kvar=qz) # runpp_with_consistency_checks(net) # # # u = 0.99061732759039389 # # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6 # # # add out of service shunt shuold not change the result # pp.create_shunt(net, b2, p_kw=pz, q_kvar=qz, in_service=False) # runpp_with_consistency_checks(net) # # # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6 # # # splitting up the shunts should not change results # b1, b2, ln = add_grid_connection(net) # pp.create_shunt(net, b2, p_kw=pz/2, q_kvar=qz/2) # pp.create_shunt(net, b2, p_kw=pz/2, q_kvar=qz/2) # runpp_with_consistency_checks(net) # # assert abs(net.res_bus.vm_pu.loc[b2] - u) < 1e-6 # # results from powerfactory # powerfactory results # powerfactory results # # Out of servic xward # powerfactory result for 1 xward # # powerfactory result for 2 active xwards # powerfactory results # enforce reactive power limits # powerfactory results # # # power transformer loading # powerfactory results # powerfactory voltage Test for enforce_q_lims loadflow option # net = pp.test.create_test_network() # net.gen.max_q_kvar = 1000 # net.gen.min_q_kvar = -1000 # pp.runpp(net, enforce_q_lims=True) # test_gen # b1=buses.index[0] # test_enforce_qlims # def test_trafo3w_tap(net): # TODO | 2.272196 | 2 |
cogs/fun.py | LyeZinho/lyezinha | 2 | 6620027 | <filename>cogs/fun.py<gh_stars>1-10
import os
import json
from discord.ext import commands
from discord import Embed
import requests
#This is an example cog to show how commands can be added
class Fun(commands.Cog):
"""Fun commands"""
def __init__(self, bot):
self.bot = bot
#Pics neko from api.waifu.pics
@bot.command(name='picneko')
async def pic_neko(ctx):
response = requests.get('https://api.waifu.pics/sfw/neko')
imageResponse = response.json()["url"]
embed = Embed(
title="-🐱Nekos🐱-"
)
await ctx.reply(embed=embed, mention_author=False)
await ctx.send("{0}".format(imageResponse))
#Pics waifu from api.waifu.pics
@bot.command(name='picwaifu')
async def pic_waifu(ctx):
response = requests.get('https://api.waifu.pics/sfw/waifu')
imageResponse = response.json()["url"]
embed = Embed(
title="-🌸Waifu🌸-"
)
await ctx.reply(embed=embed, mention_author=False)
await ctx.send("{0}".format(imageResponse))
@bot.command(name='dance')
async def dance(ctx):
response = requests.get('https://api.waifu.pics/sfw/dance')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
@bot.command(name='cringe')
async def cringe(ctx):
response = requests.get('https://api.waifu.pics/sfw/cringe')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
@bot.command(name='slap')
async def slap(ctx, arg):
response = requests.get('https://api.waifu.pics/sfw/slap')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
await ctx.send("{0} Deu um tapa em {1}".format(ctx.author ,arg))
@bot.command(name='smile')
async def smile(ctx):
response = requests.get('https://api.waifu.pics/sfw/smile')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
@bot.command(name='bonk')
async def bonk(ctx, arg):
response = requests.get('https://api.waifu.pics/sfw/bonk')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
await ctx.send("{0} Get BONK ".format(arg))
@bot.command(name='kiss')
async def kiss(ctx, arg):
response = requests.get('https://api.waifu.pics/sfw/kiss')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
await ctx.send("{0} Deu um beijo em {1}".format(ctx.author ,arg))
@bot.command(name='kill')
async def kill(ctx):
response = requests.get('https://api.waifu.pics/sfw/kill')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
#Yomama facts from yomomma-api
@bot.command(name='yomama')
async def yomama(ctx):
response = requests.get('https://yomomma-api.herokuapp.com/jokes?count=4')
factsResponse = response.json()
embed = Embed(
title="-😎YoMama😎-"
)
embed.add_field(name="Fact 1",value="**{0}**\n".format(factsResponse[0]["joke"]), inline=True)
embed.add_field(name="Fact 2",value="**{0}**\n".format(factsResponse[1]["joke"]), inline=True)
embed.add_field(name="Fact 3",value="**{0}**\n".format(factsResponse[2]["joke"]), inline=True)
await ctx.reply(embed=embed, mention_author=False)
@bot.command(name='changemymind')
async def changemymind(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = os.environ['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "129242436"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='smartthink')
async def smartthink(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = <PASSWORD>['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "89370399"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='explaing')
async def explaing(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = os.environ['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "101470"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='holdingboard')
async def holdingboard(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = <PASSWORD>['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "216951317"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='notthesame')
async def notthesame(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = <PASSWORD>['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "342785297"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
'text1':"Não! Nós não somos iguais!"
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
def setup(bot):
bot.add_cog(Fun(bot)) | <filename>cogs/fun.py<gh_stars>1-10
import os
import json
from discord.ext import commands
from discord import Embed
import requests
#This is an example cog to show how commands can be added
class Fun(commands.Cog):
"""Fun commands"""
def __init__(self, bot):
self.bot = bot
#Pics neko from api.waifu.pics
@bot.command(name='picneko')
async def pic_neko(ctx):
response = requests.get('https://api.waifu.pics/sfw/neko')
imageResponse = response.json()["url"]
embed = Embed(
title="-🐱Nekos🐱-"
)
await ctx.reply(embed=embed, mention_author=False)
await ctx.send("{0}".format(imageResponse))
#Pics waifu from api.waifu.pics
@bot.command(name='picwaifu')
async def pic_waifu(ctx):
response = requests.get('https://api.waifu.pics/sfw/waifu')
imageResponse = response.json()["url"]
embed = Embed(
title="-🌸Waifu🌸-"
)
await ctx.reply(embed=embed, mention_author=False)
await ctx.send("{0}".format(imageResponse))
@bot.command(name='dance')
async def dance(ctx):
response = requests.get('https://api.waifu.pics/sfw/dance')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
@bot.command(name='cringe')
async def cringe(ctx):
response = requests.get('https://api.waifu.pics/sfw/cringe')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
@bot.command(name='slap')
async def slap(ctx, arg):
response = requests.get('https://api.waifu.pics/sfw/slap')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
await ctx.send("{0} Deu um tapa em {1}".format(ctx.author ,arg))
@bot.command(name='smile')
async def smile(ctx):
response = requests.get('https://api.waifu.pics/sfw/smile')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
@bot.command(name='bonk')
async def bonk(ctx, arg):
response = requests.get('https://api.waifu.pics/sfw/bonk')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
await ctx.send("{0} Get BONK ".format(arg))
@bot.command(name='kiss')
async def kiss(ctx, arg):
response = requests.get('https://api.waifu.pics/sfw/kiss')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
await ctx.send("{0} Deu um beijo em {1}".format(ctx.author ,arg))
@bot.command(name='kill')
async def kill(ctx):
response = requests.get('https://api.waifu.pics/sfw/kill')
imageResponse = response.json()["url"]
await ctx.reply("{0}".format(imageResponse))
#Yomama facts from yomomma-api
@bot.command(name='yomama')
async def yomama(ctx):
response = requests.get('https://yomomma-api.herokuapp.com/jokes?count=4')
factsResponse = response.json()
embed = Embed(
title="-😎YoMama😎-"
)
embed.add_field(name="Fact 1",value="**{0}**\n".format(factsResponse[0]["joke"]), inline=True)
embed.add_field(name="Fact 2",value="**{0}**\n".format(factsResponse[1]["joke"]), inline=True)
embed.add_field(name="Fact 3",value="**{0}**\n".format(factsResponse[2]["joke"]), inline=True)
await ctx.reply(embed=embed, mention_author=False)
@bot.command(name='changemymind')
async def changemymind(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = os.environ['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "129242436"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='smartthink')
async def smartthink(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = <PASSWORD>['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "89370399"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='explaing')
async def explaing(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = os.environ['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "101470"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='holdingboard')
async def holdingboard(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = <PASSWORD>['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "216951317"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
@bot.command(name='notthesame')
async def notthesame(ctx, *, arg1):
username = os.environ['IMFLIPUSER']
password = <PASSWORD>['<PASSWORD>']
if arg1 != None:
URL = 'https://api.imgflip.com/caption_image'
_id = "342785297"
params = {
'username':username,
'password':password,
'template_id':_id,
'text0':arg1,
'text1':"Não! Nós não somos iguais!"
}
response = requests.request('POST',URL,params=params).json()
vaule1 = response["data"]
finalResult = vaule1["url"]
await ctx.send("{0}".format(finalResult))
else:
replyembed = Embed(title="-🤔 Wrong Sintax 🤔-")
replyembed.add_field(name="Correct command sintax",value="command !changemymind <text>", inline=True)
await ctx.send(replyembed=replyembed)
def setup(bot):
bot.add_cog(Fun(bot)) | en | 0.778963 | #This is an example cog to show how commands can be added Fun commands #Pics neko from api.waifu.pics #Pics waifu from api.waifu.pics #Yomama facts from yomomma-api | 3.024496 | 3 |
test/salt_master_files/tests/cust_fun_6.py | dmulyalin/salt-nornir | 12 | 6620028 | def run(result):
"""Function to test None return"""
return None
| def run(result):
"""Function to test None return"""
return None
| en | 0.714607 | Function to test None return | 1.566833 | 2 |
S3ModelTools/tools/api/resources.py | twcook/S3ModelTools | 0 | 6620029 | """
tastypie based tools API v1
Resource definitions
"""
from tastypie.resources import ModelResource
from tastypie.authentication import Authentication
from tools.models import (Project, XdBoolean, XdLink, XdString, Units, XdFile, XdInterval, ReferenceRange,
SimpleReferenceRange, XdOrdinal, XdCount, XdQuantity, XdRatio,
XdTemporal, Party, Audit, Attestation, Participation, Cluster, DM, Modeler, NS, Predicate, PredObj)
# define the resources based on the models.
class ProjectResource(ModelResource):
class Meta:
queryset = Project.objects.all()
resource_name = 'project'
always_return_data = True
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ModelerResource(ModelResource):
class Meta:
queryset = Modeler.objects.all()
resource_name = 'modeler'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class NSResource(ModelResource):
class Meta:
queryset = NS.objects.all()
resource_name = 'ns'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class PredicateResource(ModelResource):
class Meta:
queryset = Predicate.objects.all()
resource_name = 'predicate'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class PredObjResource(ModelResource):
class Meta:
queryset = PredObj.objects.all()
resource_name = 'predobj'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
# XdAny subclasses
class XdBooleanResource(ModelResource):
class Meta:
queryset = XdBoolean.objects.all()
resource_name = 'xdboolean'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdLinkResource(ModelResource):
class Meta:
queryset = XdLink.objects.all()
resource_name = 'xdlink'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdStringResource(ModelResource):
class Meta:
queryset = XdString.objects.all()
resource_name = 'xdstring'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class UnitsResource(ModelResource):
class Meta:
queryset = Units.objects.all()
resource_name = 'units'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdFileResource(ModelResource):
class Meta:
queryset = XdFile.objects.all()
resource_name = 'xdfile'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdIntervalResource(ModelResource):
class Meta:
queryset = XdInterval.objects.all()
resource_name = 'xdinterval'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ReferenceRangeResource(ModelResource):
class Meta:
queryset = ReferenceRange.objects.all()
resource_name = 'referencerange'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class SimpleReferenceRangeResource(ModelResource):
class Meta:
queryset = SimpleReferenceRange.objects.all()
resource_name = 'simplereferencerange'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdOrdinalResource(ModelResource):
class Meta:
queryset = XdOrdinal.objects.all()
resource_name = 'xdordinal'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdCountResource(ModelResource):
class Meta:
queryset = XdCount.objects.all()
resource_name = 'xdcount'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdQuantityResource(ModelResource):
class Meta:
queryset = XdQuantity.objects.all()
resource_name = 'xdquantity'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdRatioResource(ModelResource):
class Meta:
queryset = XdRatio.objects.all()
resource_name = 'xdratio'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdTemporalResource(ModelResource):
class Meta:
queryset = XdTemporal.objects.all()
resource_name = 'xdtemporal'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class PartyResource(ModelResource):
class Meta:
queryset = Party.objects.all()
resource_name = 'party'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class AuditResource(ModelResource):
class Meta:
queryset = Audit.objects.all()
resource_name = 'audit'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class AttestationResource(ModelResource):
class Meta:
queryset = Attestation.objects.all()
resource_name = 'attestation'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ParticipationResource(ModelResource):
class Meta:
queryset = Participation.objects.all()
resource_name = 'participation'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ClusterResource(ModelResource):
class Meta:
queryset = Cluster.objects.all()
resource_name = 'cluster'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
#class EntryResource(ModelResource):
#class Meta:
#queryset = Entry.objects.all()
#resource_name = 'entry'
#excludes = ['r_code', 'schema_code']
## TODO: Fix with correct authentication before deployment.
#authentication = Authentication()
class DMResource(ModelResource):
class Meta:
queryset = DM.objects.all()
resource_name = 'dm'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
| """
tastypie based tools API v1
Resource definitions
"""
from tastypie.resources import ModelResource
from tastypie.authentication import Authentication
from tools.models import (Project, XdBoolean, XdLink, XdString, Units, XdFile, XdInterval, ReferenceRange,
SimpleReferenceRange, XdOrdinal, XdCount, XdQuantity, XdRatio,
XdTemporal, Party, Audit, Attestation, Participation, Cluster, DM, Modeler, NS, Predicate, PredObj)
# define the resources based on the models.
class ProjectResource(ModelResource):
class Meta:
queryset = Project.objects.all()
resource_name = 'project'
always_return_data = True
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ModelerResource(ModelResource):
class Meta:
queryset = Modeler.objects.all()
resource_name = 'modeler'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class NSResource(ModelResource):
class Meta:
queryset = NS.objects.all()
resource_name = 'ns'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class PredicateResource(ModelResource):
class Meta:
queryset = Predicate.objects.all()
resource_name = 'predicate'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class PredObjResource(ModelResource):
class Meta:
queryset = PredObj.objects.all()
resource_name = 'predobj'
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
# XdAny subclasses
class XdBooleanResource(ModelResource):
class Meta:
queryset = XdBoolean.objects.all()
resource_name = 'xdboolean'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdLinkResource(ModelResource):
class Meta:
queryset = XdLink.objects.all()
resource_name = 'xdlink'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdStringResource(ModelResource):
class Meta:
queryset = XdString.objects.all()
resource_name = 'xdstring'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class UnitsResource(ModelResource):
class Meta:
queryset = Units.objects.all()
resource_name = 'units'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdFileResource(ModelResource):
class Meta:
queryset = XdFile.objects.all()
resource_name = 'xdfile'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdIntervalResource(ModelResource):
class Meta:
queryset = XdInterval.objects.all()
resource_name = 'xdinterval'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ReferenceRangeResource(ModelResource):
class Meta:
queryset = ReferenceRange.objects.all()
resource_name = 'referencerange'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class SimpleReferenceRangeResource(ModelResource):
class Meta:
queryset = SimpleReferenceRange.objects.all()
resource_name = 'simplereferencerange'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdOrdinalResource(ModelResource):
class Meta:
queryset = XdOrdinal.objects.all()
resource_name = 'xdordinal'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdCountResource(ModelResource):
class Meta:
queryset = XdCount.objects.all()
resource_name = 'xdcount'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdQuantityResource(ModelResource):
class Meta:
queryset = XdQuantity.objects.all()
resource_name = 'xdquantity'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdRatioResource(ModelResource):
class Meta:
queryset = XdRatio.objects.all()
resource_name = 'xdratio'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class XdTemporalResource(ModelResource):
class Meta:
queryset = XdTemporal.objects.all()
resource_name = 'xdtemporal'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class PartyResource(ModelResource):
class Meta:
queryset = Party.objects.all()
resource_name = 'party'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class AuditResource(ModelResource):
class Meta:
queryset = Audit.objects.all()
resource_name = 'audit'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class AttestationResource(ModelResource):
class Meta:
queryset = Attestation.objects.all()
resource_name = 'attestation'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ParticipationResource(ModelResource):
class Meta:
queryset = Participation.objects.all()
resource_name = 'participation'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
class ClusterResource(ModelResource):
class Meta:
queryset = Cluster.objects.all()
resource_name = 'cluster'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
#class EntryResource(ModelResource):
#class Meta:
#queryset = Entry.objects.all()
#resource_name = 'entry'
#excludes = ['r_code', 'schema_code']
## TODO: Fix with correct authentication before deployment.
#authentication = Authentication()
class DMResource(ModelResource):
class Meta:
queryset = DM.objects.all()
resource_name = 'dm'
excludes = ['r_code', 'schema_code']
# TODO: Fix with correct authentication before deployment.
authentication = Authentication()
| en | 0.7567 | tastypie based tools API v1
Resource definitions # define the resources based on the models. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # XdAny subclasses # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. # TODO: Fix with correct authentication before deployment. #class EntryResource(ModelResource): #class Meta: #queryset = Entry.objects.all() #resource_name = 'entry' #excludes = ['r_code', 'schema_code'] ## TODO: Fix with correct authentication before deployment. #authentication = Authentication() # TODO: Fix with correct authentication before deployment. | 2.165133 | 2 |
src/ai/cv/draw_box_cv.py | hbulpf/pydemo | 6 | 6620030 | <reponame>hbulpf/pydemo
#!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 2021/7/4 12:16
@Author: RunAtWorld
@File: draw_box_cv.py
@Project: PyCharm
"""
import cv2
def draw():
image_path = 'taxi.jpg'
img = cv2.imread(image_path)
# 左上(X,Y)
pt1 = (341, 718)
# 右下(X,Y)
pt2 = (341 + 342, 718 + 201)
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 2)
# 类别名称
object_type = 'taxi'
# 置信度
credence = 0.596
# 定义字体
font = cv2.FONT_HERSHEY_SIMPLEX
# 打标签: 文字内容, 左上角坐标,字体,大小,颜色,字体厚度
cv2.putText(img, '{} {:.3f}'.format(object_type, credence), (341 + 50, 718 - 15), font, 1, (0, 0, 255), 2)
cv2.imwrite('res/taxi-cv.jpg', img)
if __name__ == '__main__':
draw()
| #!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 2021/7/4 12:16
@Author: RunAtWorld
@File: draw_box_cv.py
@Project: PyCharm
"""
import cv2
def draw():
image_path = 'taxi.jpg'
img = cv2.imread(image_path)
# 左上(X,Y)
pt1 = (341, 718)
# 右下(X,Y)
pt2 = (341 + 342, 718 + 201)
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 2)
# 类别名称
object_type = 'taxi'
# 置信度
credence = 0.596
# 定义字体
font = cv2.FONT_HERSHEY_SIMPLEX
# 打标签: 文字内容, 左上角坐标,字体,大小,颜色,字体厚度
cv2.putText(img, '{} {:.3f}'.format(object_type, credence), (341 + 50, 718 - 15), font, 1, (0, 0, 255), 2)
cv2.imwrite('res/taxi-cv.jpg', img)
if __name__ == '__main__':
draw() | zh | 0.456274 | #!/usr/bin/env python # _*_coding:utf-8_*_ @Time : 2021/7/4 12:16 @Author: RunAtWorld @File: draw_box_cv.py @Project: PyCharm # 左上(X,Y) # 右下(X,Y) # 类别名称 # 置信度 # 定义字体 # 打标签: 文字内容, 左上角坐标,字体,大小,颜色,字体厚度 | 3.111666 | 3 |
setup.py | huanglianghua/cortex | 8 | 6620031 | from setuptools import setup, find_packages
if __name__ == '__main__':
setup(
name='cortex',
version='0.0.2',
description='A minimal engine and a large benchmark for deep learning algorithms',
author='<NAME>',
author_email='<EMAIL>',
keywords=['cortex', 'deep learning'],
url='https://github.com/huanglianghua/cortex',
packages=find_packages(exclude=('tests', 'tools')),
include_package_data=True,
license='MIT License')
| from setuptools import setup, find_packages
if __name__ == '__main__':
setup(
name='cortex',
version='0.0.2',
description='A minimal engine and a large benchmark for deep learning algorithms',
author='<NAME>',
author_email='<EMAIL>',
keywords=['cortex', 'deep learning'],
url='https://github.com/huanglianghua/cortex',
packages=find_packages(exclude=('tests', 'tools')),
include_package_data=True,
license='MIT License')
| none | 1 | 1.071174 | 1 | |
packages/proteus.models.maskrcnn/proteus/models/maskrcnn/client.py | PieterBlomme/proteus | 8 | 6620032 | <reponame>PieterBlomme/proteus<gh_stars>1-10
import logging
from pathlib import Path
import cv2
import numpy as np
from PIL import Image
from proteus.models.base import BaseModel
from proteus.models.base.modelconfigs import BaseModelConfig
from proteus.types import BoundingBox, Segmentation
from tritonclient.utils import triton_to_np_dtype
from .helpers import detection_postprocess, image_preprocess, read_class_names
logger = logging.getLogger(__name__)
folder_path = Path(__file__).parent
class ModelConfig(BaseModelConfig):
pass
class MaskRCNN(BaseModel):
DESCRIPTION = (
"This model is a real-time neural network for object "
"instance segmentation that detects 80 different classes."
"mAP of 0.36"
"Taken from https://github.com/onnx/models."
)
CLASSES = read_class_names(f"{folder_path}/coco_names.txt")
NUM_OUTPUTS = 4
MAX_BATCH_SIZE = 0
MODEL_URL = "https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/mask-rcnn/model/MaskRCNN-10.onnx"
CONFIG_PATH = f"{folder_path}/config.template"
INPUT_NAME = "image"
OUTPUT_NAMES = ["6568", "6570", "6572", "6887"]
DTYPE = "FP32"
MODEL_CONFIG = ModelConfig
@classmethod
def preprocess(cls, img):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
:param img: Pillow image
:returns:
- model_input: input as required by the model
- extra_data: dict of data that is needed by the postprocess function
"""
extra_data = {}
# Careful, Pillow has (w,h) format but most models expect (h,w)
w, h = img.size
extra_data["original_image_size"] = (h, w)
img = img.convert("RGB")
logger.info(f"Original image size: {img.size}")
img = image_preprocess(img)
npdtype = triton_to_np_dtype(cls.DTYPE)
img = img.astype(npdtype)
return img, extra_data
@classmethod
def postprocess(cls, results, extra_data, batch_size, batching):
"""
Post-process results to show bounding boxes.
https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet
:param results: model outputs
:param extra_data: dict of data that is needed by the postprocess function
:param batch_size
:param batching: boolean flag indicating if batching
:returns: json result
"""
original_image_size = extra_data["original_image_size"]
# get outputs
boxes = results.as_numpy(cls.OUTPUT_NAMES[0])
labels = results.as_numpy(cls.OUTPUT_NAMES[1])
scores = results.as_numpy(cls.OUTPUT_NAMES[2])
masks = results.as_numpy(cls.OUTPUT_NAMES[3])
postprocess_results = detection_postprocess(
original_image_size, boxes, labels, scores, masks
)
results = []
# TODO add another loop if batching
for (score, box, cat, mask) in postprocess_results:
x1, y1, x2, y2 = box
bbox = BoundingBox(
x1=int(x1),
y1=int(y1),
x2=int(x2),
y2=int(y2),
class_name=cls.CLASSES[int(cat)],
score=float(score),
)
ret, thresh = cv2.threshold(mask, 0.5, 1, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
polygon = contours[0].reshape(-1).tolist()
if len(polygon) <= 4:
# not valid, create a dummy
polygon = [0, 0, 1, 0, 1, 1]
segmentation = Segmentation(
segmentation=polygon,
class_name=cls.CLASSES[int(cat)],
score=float(score),
)
results.append({"bounding_box": bbox, "segmentation": segmentation})
return results
| import logging
from pathlib import Path
import cv2
import numpy as np
from PIL import Image
from proteus.models.base import BaseModel
from proteus.models.base.modelconfigs import BaseModelConfig
from proteus.types import BoundingBox, Segmentation
from tritonclient.utils import triton_to_np_dtype
from .helpers import detection_postprocess, image_preprocess, read_class_names
logger = logging.getLogger(__name__)
folder_path = Path(__file__).parent
class ModelConfig(BaseModelConfig):
pass
class MaskRCNN(BaseModel):
DESCRIPTION = (
"This model is a real-time neural network for object "
"instance segmentation that detects 80 different classes."
"mAP of 0.36"
"Taken from https://github.com/onnx/models."
)
CLASSES = read_class_names(f"{folder_path}/coco_names.txt")
NUM_OUTPUTS = 4
MAX_BATCH_SIZE = 0
MODEL_URL = "https://github.com/onnx/models/raw/master/vision/object_detection_segmentation/mask-rcnn/model/MaskRCNN-10.onnx"
CONFIG_PATH = f"{folder_path}/config.template"
INPUT_NAME = "image"
OUTPUT_NAMES = ["6568", "6570", "6572", "6887"]
DTYPE = "FP32"
MODEL_CONFIG = ModelConfig
@classmethod
def preprocess(cls, img):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
:param img: Pillow image
:returns:
- model_input: input as required by the model
- extra_data: dict of data that is needed by the postprocess function
"""
extra_data = {}
# Careful, Pillow has (w,h) format but most models expect (h,w)
w, h = img.size
extra_data["original_image_size"] = (h, w)
img = img.convert("RGB")
logger.info(f"Original image size: {img.size}")
img = image_preprocess(img)
npdtype = triton_to_np_dtype(cls.DTYPE)
img = img.astype(npdtype)
return img, extra_data
@classmethod
def postprocess(cls, results, extra_data, batch_size, batching):
"""
Post-process results to show bounding boxes.
https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet
:param results: model outputs
:param extra_data: dict of data that is needed by the postprocess function
:param batch_size
:param batching: boolean flag indicating if batching
:returns: json result
"""
original_image_size = extra_data["original_image_size"]
# get outputs
boxes = results.as_numpy(cls.OUTPUT_NAMES[0])
labels = results.as_numpy(cls.OUTPUT_NAMES[1])
scores = results.as_numpy(cls.OUTPUT_NAMES[2])
masks = results.as_numpy(cls.OUTPUT_NAMES[3])
postprocess_results = detection_postprocess(
original_image_size, boxes, labels, scores, masks
)
results = []
# TODO add another loop if batching
for (score, box, cat, mask) in postprocess_results:
x1, y1, x2, y2 = box
bbox = BoundingBox(
x1=int(x1),
y1=int(y1),
x2=int(x2),
y2=int(y2),
class_name=cls.CLASSES[int(cat)],
score=float(score),
)
ret, thresh = cv2.threshold(mask, 0.5, 1, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
polygon = contours[0].reshape(-1).tolist()
if len(polygon) <= 4:
# not valid, create a dummy
polygon = [0, 0, 1, 0, 1, 1]
segmentation = Segmentation(
segmentation=polygon,
class_name=cls.CLASSES[int(cat)],
score=float(score),
)
results.append({"bounding_box": bbox, "segmentation": segmentation})
return results | en | 0.773518 | Pre-process an image to meet the size, type and format requirements specified by the parameters. :param img: Pillow image :returns: - model_input: input as required by the model - extra_data: dict of data that is needed by the postprocess function # Careful, Pillow has (w,h) format but most models expect (h,w) Post-process results to show bounding boxes. https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/retinanet :param results: model outputs :param extra_data: dict of data that is needed by the postprocess function :param batch_size :param batching: boolean flag indicating if batching :returns: json result # get outputs # TODO add another loop if batching # not valid, create a dummy | 2.294373 | 2 |
construct/compat.py | construct-org/construct | 8 | 6620033 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
basestring = basestring
except NameError:
basestring = (str, bytes)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
basestring = basestring
except NameError:
basestring = (str, bytes) | en | 0.769321 | # -*- coding: utf-8 -*- | 1.594749 | 2 |
sinnn/utils.py | sohaibimran7/sinnn | 1 | 6620034 | <filename>sinnn/utils.py
import numpy as np
import pickle
def sigmoid(x):
"""
Applies the sigmoid transformation on a given 1d vector
Parameters
----------
x : ndarray
1d Array to be sigmoided
Returns
-------
ndarray
1d Array of same shape as x containing sigmoided values of x
Notes
-----
σ(x) = 1/(1 + e^-x) = (e^x)/(1 + e^x)
where:
σ = sigmoid function
x = input
e = Euler's number
"""
return np.where(x >= 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x)))
def softmax(x):
"""
Applies the softmax transformation on a given 2d vector
In order to make softmax more stable, x.max is subtracted from each value of x before performing softmax.
Parameters
----------
x : ndarray
2d array to be softmaxed
Returns
-------
ndarray
2d Array of same shape as x containing softmaxed values of x
Notes
-----
σ(x) = (e^x_i)/ Σ (e^x)
where:
σ = softmax function
x_i = ith value of x
x = input
e = Euler's number
"""
x = x - x.max(axis=1, keepdims=True)
y = np.exp(x)
return y / y.sum(axis=1, keepdims=True)
def one_hot(labels, categories):
"""
One hot encodes the labels.
Parameters
----------
labels : ndarray
1d array of ints containing labels.
categories : int
the number of categories to encode the labels onto
Returns
-------
ndarray
2d Array of shape (len(x), categories) containing one hot encoded labels
"""
y = np.zeros((labels.size, categories))
y[np.arange(labels.size), labels] = 1
return y
def batches(x, y, batch_size):
"""
Yields successive batch_sized chunks of inputs and corresponding outputs from list.
Parameters
----------
x : list
list of inputs
y : list
list of outputs
batch_size :
size of the batches to be yielded.
Returns
-------
list
batch of inputs of length (batch_size)
list
batch of outputs of length (batch_size)
"""
for i in range(0, len(x), batch_size):
yield x[i:i + batch_size], y[i:i + batch_size]
def save_model(model, filename='model'):
"""
Saves an object to file
Parameters
----------
model : object
any object to be stored on file
filename : str
name of the file the object needs to be stored on. Default: model
"""
with open(filename, 'wb') as f:
pickle.dump(model, f, protocol=-1)
def load_model(filename='model'):
"""
Loads an object from file.
Parameters
----------
filename: str
name of the file the object needs to be loaded from. Default: model
Returns
-------
object that was stored on the file
"""
with open(filename, 'rb') as f:
return pickle.load(f)
| <filename>sinnn/utils.py
import numpy as np
import pickle
def sigmoid(x):
"""
Applies the sigmoid transformation on a given 1d vector
Parameters
----------
x : ndarray
1d Array to be sigmoided
Returns
-------
ndarray
1d Array of same shape as x containing sigmoided values of x
Notes
-----
σ(x) = 1/(1 + e^-x) = (e^x)/(1 + e^x)
where:
σ = sigmoid function
x = input
e = Euler's number
"""
return np.where(x >= 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x)))
def softmax(x):
"""
Applies the softmax transformation on a given 2d vector
In order to make softmax more stable, x.max is subtracted from each value of x before performing softmax.
Parameters
----------
x : ndarray
2d array to be softmaxed
Returns
-------
ndarray
2d Array of same shape as x containing softmaxed values of x
Notes
-----
σ(x) = (e^x_i)/ Σ (e^x)
where:
σ = softmax function
x_i = ith value of x
x = input
e = Euler's number
"""
x = x - x.max(axis=1, keepdims=True)
y = np.exp(x)
return y / y.sum(axis=1, keepdims=True)
def one_hot(labels, categories):
"""
One hot encodes the labels.
Parameters
----------
labels : ndarray
1d array of ints containing labels.
categories : int
the number of categories to encode the labels onto
Returns
-------
ndarray
2d Array of shape (len(x), categories) containing one hot encoded labels
"""
y = np.zeros((labels.size, categories))
y[np.arange(labels.size), labels] = 1
return y
def batches(x, y, batch_size):
"""
Yields successive batch_sized chunks of inputs and corresponding outputs from list.
Parameters
----------
x : list
list of inputs
y : list
list of outputs
batch_size :
size of the batches to be yielded.
Returns
-------
list
batch of inputs of length (batch_size)
list
batch of outputs of length (batch_size)
"""
for i in range(0, len(x), batch_size):
yield x[i:i + batch_size], y[i:i + batch_size]
def save_model(model, filename='model'):
"""
Saves an object to file
Parameters
----------
model : object
any object to be stored on file
filename : str
name of the file the object needs to be stored on. Default: model
"""
with open(filename, 'wb') as f:
pickle.dump(model, f, protocol=-1)
def load_model(filename='model'):
"""
Loads an object from file.
Parameters
----------
filename: str
name of the file the object needs to be loaded from. Default: model
Returns
-------
object that was stored on the file
"""
with open(filename, 'rb') as f:
return pickle.load(f)
| en | 0.664754 | Applies the sigmoid transformation on a given 1d vector Parameters ---------- x : ndarray 1d Array to be sigmoided Returns ------- ndarray 1d Array of same shape as x containing sigmoided values of x Notes ----- σ(x) = 1/(1 + e^-x) = (e^x)/(1 + e^x) where: σ = sigmoid function x = input e = Euler's number Applies the softmax transformation on a given 2d vector In order to make softmax more stable, x.max is subtracted from each value of x before performing softmax. Parameters ---------- x : ndarray 2d array to be softmaxed Returns ------- ndarray 2d Array of same shape as x containing softmaxed values of x Notes ----- σ(x) = (e^x_i)/ Σ (e^x) where: σ = softmax function x_i = ith value of x x = input e = Euler's number One hot encodes the labels. Parameters ---------- labels : ndarray 1d array of ints containing labels. categories : int the number of categories to encode the labels onto Returns ------- ndarray 2d Array of shape (len(x), categories) containing one hot encoded labels Yields successive batch_sized chunks of inputs and corresponding outputs from list. Parameters ---------- x : list list of inputs y : list list of outputs batch_size : size of the batches to be yielded. Returns ------- list batch of inputs of length (batch_size) list batch of outputs of length (batch_size) Saves an object to file Parameters ---------- model : object any object to be stored on file filename : str name of the file the object needs to be stored on. Default: model Loads an object from file. Parameters ---------- filename: str name of the file the object needs to be loaded from. Default: model Returns ------- object that was stored on the file | 3.81595 | 4 |
src/pyexpt/expts.py | benweishi/PyExpt | 0 | 6620035 | import pandas as pd
import numpy as np
from itertools import product
from sklearn.model_selection import ParameterGrid
from sklearn.base import clone, BaseEstimator
import matplotlib.pyplot as plt
import seaborn as sns
import time
class Expt():
"""A class to run experiments.
Product run all algorithm in `alg_list` with all dataset in `data_list`,
try all params in `alg_params` and `data_params`, repeat `n_repeat` times.
Parameters
----------
run_func : function(algorithm, dataset), optional
A function to run the algorithm with the dataset. The default is None.
alg_list : list of sklearn.base.BaseEstimator, optional
A list of algorithms to run. The default is [].
data_list : list of sklearn.base.BaseEstimator, optional
A list of datasets to run. The default is [].
measure_func : function(algorithm, dataset), optional
A function to measure the performance of the algorithm with the dataset. The default is None.
The function should return a dictionary of results.
alg_params : dict, optional
A dictionary of parameters for the algorithms. The default is {}.
data_params : dict, optional
A dictionary of parameters for the datasets. The default is {}.
n_repeat : int, optional
Number of times to repeat the experiment, also is used as a part of the random_state for the dataset and the algorithm. The default is 1.
random_state : int, optional
Random seed. The default is None, which means to randomly generate one for each run.
The actural random seed is random_state + round.
"""
def __init__(self, run_func=None, alg_list=[], data_list=[], measure_func=None, alg_params={}, data_params={}, n_repeat=1, random_state=None) -> None:
self.run_func = run_func
self.alg_list = alg_list
self.data_list = data_list
self.measure_func = measure_func
self.alg_params = alg_params
self.data_params = data_params
self.n_repeat = n_repeat
self.random_state = random_state
self.results = None
def run(self, n_repeat=None, random_state=None):
"""Run the experiments and store the results in self.results as a `pandas.DataFrame`,
1 row per experiment, with columns:
['round','alg','data','time'] + alg_params.keys() + data_params.keys() + measure_func.keys().
Parameters
----------
n_repeat : int, optional
Number of times to repeat the experiment. The default is None, which means to use the value in self.n_repeat.
random_state : int, optional
Random seed. The default is None, which means to use the value in self.random_state.
"""
if n_repeat is None:
n_repeat = self.n_repeat
if random_state is None:
random_state = np.random.randint(2**30) if self.random_state is None else self.random_state
self.results = pd.DataFrame(columns=['round'])
for r in range(n_repeat):
result = {'round':r}
for data, data_params in product(self.data_list, ParameterGrid(self.data_params)):
if data is None:
result |= {'data':'None'}
else:
start_time = time.time()
dataset = clone(data).set_params(**data_params, random_state=random_state+r)
result |= {'data':str(data), 'make_time':time.time()-start_time} | data_params
for alg, alg_params in product(self.alg_list, ParameterGrid(self.alg_params)):
if alg is None:
result |= {'alg':'None'}
else:
start_time = time.time()
algorithm = clone(alg).set_params(**alg_params)
result |= {'alg':str(algorithm), 'init_time':time.time()-start_time} | alg_params
if self.run_func is not None:
start_time = time.time()
self.run_func(algorithm, dataset)
result |= {'run_time':time.time()-start_time}
if self.measure_func is not None:
start_time = time.time()
result |= self.measure_func(algorithm, dataset) | {'measure_time':time.time()-start_time}
self.results = pd.concat([self.results, pd.DataFrame([result])], ignore_index=True)
return self
def plot(self, x_list, y_list, group):
"""Plot the results."""
n_row = len(y_list)
n_col = len(x_list)
fig, axs = plt.subplots(n_row, n_col, squeeze=False, sharex=True, figsize=(0.5+5.5*n_col,0.5+3.5*n_row))
for r in range(n_row):
for c in range(n_col):
sns.lineplot(data=self.results, x=x_list[c], y=y_list[r], hue="alg", ax=axs[r, c]) | import pandas as pd
import numpy as np
from itertools import product
from sklearn.model_selection import ParameterGrid
from sklearn.base import clone, BaseEstimator
import matplotlib.pyplot as plt
import seaborn as sns
import time
class Expt():
"""A class to run experiments.
Product run all algorithm in `alg_list` with all dataset in `data_list`,
try all params in `alg_params` and `data_params`, repeat `n_repeat` times.
Parameters
----------
run_func : function(algorithm, dataset), optional
A function to run the algorithm with the dataset. The default is None.
alg_list : list of sklearn.base.BaseEstimator, optional
A list of algorithms to run. The default is [].
data_list : list of sklearn.base.BaseEstimator, optional
A list of datasets to run. The default is [].
measure_func : function(algorithm, dataset), optional
A function to measure the performance of the algorithm with the dataset. The default is None.
The function should return a dictionary of results.
alg_params : dict, optional
A dictionary of parameters for the algorithms. The default is {}.
data_params : dict, optional
A dictionary of parameters for the datasets. The default is {}.
n_repeat : int, optional
Number of times to repeat the experiment, also is used as a part of the random_state for the dataset and the algorithm. The default is 1.
random_state : int, optional
Random seed. The default is None, which means to randomly generate one for each run.
The actural random seed is random_state + round.
"""
def __init__(self, run_func=None, alg_list=[], data_list=[], measure_func=None, alg_params={}, data_params={}, n_repeat=1, random_state=None) -> None:
self.run_func = run_func
self.alg_list = alg_list
self.data_list = data_list
self.measure_func = measure_func
self.alg_params = alg_params
self.data_params = data_params
self.n_repeat = n_repeat
self.random_state = random_state
self.results = None
def run(self, n_repeat=None, random_state=None):
"""Run the experiments and store the results in self.results as a `pandas.DataFrame`,
1 row per experiment, with columns:
['round','alg','data','time'] + alg_params.keys() + data_params.keys() + measure_func.keys().
Parameters
----------
n_repeat : int, optional
Number of times to repeat the experiment. The default is None, which means to use the value in self.n_repeat.
random_state : int, optional
Random seed. The default is None, which means to use the value in self.random_state.
"""
if n_repeat is None:
n_repeat = self.n_repeat
if random_state is None:
random_state = np.random.randint(2**30) if self.random_state is None else self.random_state
self.results = pd.DataFrame(columns=['round'])
for r in range(n_repeat):
result = {'round':r}
for data, data_params in product(self.data_list, ParameterGrid(self.data_params)):
if data is None:
result |= {'data':'None'}
else:
start_time = time.time()
dataset = clone(data).set_params(**data_params, random_state=random_state+r)
result |= {'data':str(data), 'make_time':time.time()-start_time} | data_params
for alg, alg_params in product(self.alg_list, ParameterGrid(self.alg_params)):
if alg is None:
result |= {'alg':'None'}
else:
start_time = time.time()
algorithm = clone(alg).set_params(**alg_params)
result |= {'alg':str(algorithm), 'init_time':time.time()-start_time} | alg_params
if self.run_func is not None:
start_time = time.time()
self.run_func(algorithm, dataset)
result |= {'run_time':time.time()-start_time}
if self.measure_func is not None:
start_time = time.time()
result |= self.measure_func(algorithm, dataset) | {'measure_time':time.time()-start_time}
self.results = pd.concat([self.results, pd.DataFrame([result])], ignore_index=True)
return self
def plot(self, x_list, y_list, group):
"""Plot the results."""
n_row = len(y_list)
n_col = len(x_list)
fig, axs = plt.subplots(n_row, n_col, squeeze=False, sharex=True, figsize=(0.5+5.5*n_col,0.5+3.5*n_row))
for r in range(n_row):
for c in range(n_col):
sns.lineplot(data=self.results, x=x_list[c], y=y_list[r], hue="alg", ax=axs[r, c]) | en | 0.639225 | A class to run experiments. Product run all algorithm in `alg_list` with all dataset in `data_list`, try all params in `alg_params` and `data_params`, repeat `n_repeat` times. Parameters ---------- run_func : function(algorithm, dataset), optional A function to run the algorithm with the dataset. The default is None. alg_list : list of sklearn.base.BaseEstimator, optional A list of algorithms to run. The default is []. data_list : list of sklearn.base.BaseEstimator, optional A list of datasets to run. The default is []. measure_func : function(algorithm, dataset), optional A function to measure the performance of the algorithm with the dataset. The default is None. The function should return a dictionary of results. alg_params : dict, optional A dictionary of parameters for the algorithms. The default is {}. data_params : dict, optional A dictionary of parameters for the datasets. The default is {}. n_repeat : int, optional Number of times to repeat the experiment, also is used as a part of the random_state for the dataset and the algorithm. The default is 1. random_state : int, optional Random seed. The default is None, which means to randomly generate one for each run. The actural random seed is random_state + round. Run the experiments and store the results in self.results as a `pandas.DataFrame`, 1 row per experiment, with columns: ['round','alg','data','time'] + alg_params.keys() + data_params.keys() + measure_func.keys(). Parameters ---------- n_repeat : int, optional Number of times to repeat the experiment. The default is None, which means to use the value in self.n_repeat. random_state : int, optional Random seed. The default is None, which means to use the value in self.random_state. Plot the results. | 3.102509 | 3 |
regiosqm/molecule_svg.py | jensengroup/RegioSQM20 | 2 | 6620036 |
from rdkit import Chem
from rdkit.Chem.Draw import rdMolDraw2D
# from rdkit.Chem import rdDepictor
# rdDepictor.SetPreferCoordGen(True)
from reorder_atoms import get_atoms_in_order
from collections import defaultdict
# Drawing Options
color_predicted = (0.2, 1, 0.0) # Green
color_loseicted = (1.0, 0.1, 0.3) # Red
color_measured = (0.0, 0.0, 0.0) # Black
arad = 0.4 #0.25
#molsPerRow = 4 #change this in generate_structure()
subImgSize = (300,300)
def draw2d(mol, name, subImgSize, highlight_predicted, highlight_loseicted, measure=None):
global color_predicted
global color_loseicted
global color_measured
global arad
d2d = rdMolDraw2D.MolDraw2DSVG(subImgSize[0], subImgSize[1])
d2d.SetFontSize(1) #atom label font size
dos = d2d.drawOptions()
dos.legendFontSize=23 #legend font size
dos.atomHighlightsAreCircles = False
dos.fillHighlights = True
atomHighlighs = defaultdict(list)
highlightRads = {}
for idx in highlight_predicted:
atomHighlighs[idx].append(color_predicted)
highlightRads[idx] = arad
# did threshold find some predictions?
# find ones not in predicted list
highlight_loseicted = list(set(highlight_loseicted)-set(highlight_predicted))
if len(highlight_loseicted):
for idx in highlight_loseicted:
atomHighlighs[idx].append(color_loseicted)
highlightRads[idx] = arad
if measure:
for idx in measure:
atomHighlighs[idx].append(color_measured)
highlightRads[idx] = arad
d2d.DrawMoleculeWithHighlights(mol, name, dict(atomHighlighs), {}, highlightRads, {})
d2d.FinishDrawing()
return d2d.GetDrawingText()
def generate_structure(ref_smi, smiles, names, predicted, highlight_measure=None):
global subImgSize
molsPerRow = 4
highlight_predicted, highlight_loseicted = predicted
if names == None:
names = ['' for i in range(len(smiles))]
nRows = len(smiles) // molsPerRow
if len(smiles) % molsPerRow:
nRows += 1
if nRows == 1:
molsPerRow = len(smiles)
fullSize = (molsPerRow * subImgSize[0], nRows * subImgSize[1])
header = """<svg version='1.1' baseProfile='full'
xmlns='http://www.w3.org/2000/svg'
xmlns:rdkit='http://www.rdkit.org/xml'
xmlns:xlink='http://www.w3.org/1999/xlink'
xml:space='preserve'
width='{0}px' height='{1}px' viewBox='0 0 {0} {1}'>
<!-- END OF HEADER -->""".format(fullSize[0],fullSize[1])
spacer = '<g transform="translate({0},{1})">\n{2}</g>'
### Make sure the atoms are in order ###
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
mols = get_atoms_in_order(Chem.MolFromSmiles(ref_smi), mols)
cwidth = 0
cheight = 0
drawed_mols = []
for i in range(len(smiles)):
res = draw2d(mols[i], names[i], subImgSize, highlight_predicted[i], highlight_loseicted[i], highlight_measure)
res = res.split("\n")
end_of_header = res.index("<!-- END OF HEADER -->") + 1
res = "\n".join(res[end_of_header:-2])
res = "".join(spacer.format(int(cwidth*subImgSize[0]), int(cheight*subImgSize[1]), res))
drawed_mols.append(res)
if int(i+1) % molsPerRow == 0 and i != 0:
cheight += 1
cwidth = 0
elif molsPerRow == 1:
cheight += 1
cwidth = 0
else:
cwidth += 1
svg = header + "\n" + "\n".join(drawed_mols) + "\n</svg>"
return svg
if __name__ == "__main__":
ref_smi = 'c1c(c2cc(sc2)C)n[nH]c1'
smiles = ['c1c(-c2cc(C)sc2)[nH]nc1', 'c1c(-c2cc(C)sc2)n[nH]c1']
names = ['taut1', 'taut2']
highlight_predicted = [[7,0], [10]]
highlight_loseicted = [[7], [10]]
highlight_measure = [0]
result_svg = generate_structure(ref_smi, smiles, names, [highlight_predicted, highlight_loseicted], highlight_measure=highlight_measure)
fd = open('test.svg','w')
fd.write(result_svg)
fd.close()
|
from rdkit import Chem
from rdkit.Chem.Draw import rdMolDraw2D
# from rdkit.Chem import rdDepictor
# rdDepictor.SetPreferCoordGen(True)
from reorder_atoms import get_atoms_in_order
from collections import defaultdict
# Drawing Options
color_predicted = (0.2, 1, 0.0) # Green
color_loseicted = (1.0, 0.1, 0.3) # Red
color_measured = (0.0, 0.0, 0.0) # Black
arad = 0.4 #0.25
#molsPerRow = 4 #change this in generate_structure()
subImgSize = (300,300)
def draw2d(mol, name, subImgSize, highlight_predicted, highlight_loseicted, measure=None):
global color_predicted
global color_loseicted
global color_measured
global arad
d2d = rdMolDraw2D.MolDraw2DSVG(subImgSize[0], subImgSize[1])
d2d.SetFontSize(1) #atom label font size
dos = d2d.drawOptions()
dos.legendFontSize=23 #legend font size
dos.atomHighlightsAreCircles = False
dos.fillHighlights = True
atomHighlighs = defaultdict(list)
highlightRads = {}
for idx in highlight_predicted:
atomHighlighs[idx].append(color_predicted)
highlightRads[idx] = arad
# did threshold find some predictions?
# find ones not in predicted list
highlight_loseicted = list(set(highlight_loseicted)-set(highlight_predicted))
if len(highlight_loseicted):
for idx in highlight_loseicted:
atomHighlighs[idx].append(color_loseicted)
highlightRads[idx] = arad
if measure:
for idx in measure:
atomHighlighs[idx].append(color_measured)
highlightRads[idx] = arad
d2d.DrawMoleculeWithHighlights(mol, name, dict(atomHighlighs), {}, highlightRads, {})
d2d.FinishDrawing()
return d2d.GetDrawingText()
def generate_structure(ref_smi, smiles, names, predicted, highlight_measure=None):
global subImgSize
molsPerRow = 4
highlight_predicted, highlight_loseicted = predicted
if names == None:
names = ['' for i in range(len(smiles))]
nRows = len(smiles) // molsPerRow
if len(smiles) % molsPerRow:
nRows += 1
if nRows == 1:
molsPerRow = len(smiles)
fullSize = (molsPerRow * subImgSize[0], nRows * subImgSize[1])
header = """<svg version='1.1' baseProfile='full'
xmlns='http://www.w3.org/2000/svg'
xmlns:rdkit='http://www.rdkit.org/xml'
xmlns:xlink='http://www.w3.org/1999/xlink'
xml:space='preserve'
width='{0}px' height='{1}px' viewBox='0 0 {0} {1}'>
<!-- END OF HEADER -->""".format(fullSize[0],fullSize[1])
spacer = '<g transform="translate({0},{1})">\n{2}</g>'
### Make sure the atoms are in order ###
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
mols = get_atoms_in_order(Chem.MolFromSmiles(ref_smi), mols)
cwidth = 0
cheight = 0
drawed_mols = []
for i in range(len(smiles)):
res = draw2d(mols[i], names[i], subImgSize, highlight_predicted[i], highlight_loseicted[i], highlight_measure)
res = res.split("\n")
end_of_header = res.index("<!-- END OF HEADER -->") + 1
res = "\n".join(res[end_of_header:-2])
res = "".join(spacer.format(int(cwidth*subImgSize[0]), int(cheight*subImgSize[1]), res))
drawed_mols.append(res)
if int(i+1) % molsPerRow == 0 and i != 0:
cheight += 1
cwidth = 0
elif molsPerRow == 1:
cheight += 1
cwidth = 0
else:
cwidth += 1
svg = header + "\n" + "\n".join(drawed_mols) + "\n</svg>"
return svg
if __name__ == "__main__":
ref_smi = 'c1c(c2cc(sc2)C)n[nH]c1'
smiles = ['c1c(-c2cc(C)sc2)[nH]nc1', 'c1c(-c2cc(C)sc2)n[nH]c1']
names = ['taut1', 'taut2']
highlight_predicted = [[7,0], [10]]
highlight_loseicted = [[7], [10]]
highlight_measure = [0]
result_svg = generate_structure(ref_smi, smiles, names, [highlight_predicted, highlight_loseicted], highlight_measure=highlight_measure)
fd = open('test.svg','w')
fd.write(result_svg)
fd.close()
| en | 0.342577 | # from rdkit.Chem import rdDepictor # rdDepictor.SetPreferCoordGen(True) # Drawing Options # Green # Red # Black #0.25 #molsPerRow = 4 #change this in generate_structure() #atom label font size #legend font size # did threshold find some predictions? # find ones not in predicted list <svg version='1.1' baseProfile='full' xmlns='http://www.w3.org/2000/svg' xmlns:rdkit='http://www.rdkit.org/xml' xmlns:xlink='http://www.w3.org/1999/xlink' xml:space='preserve' width='{0}px' height='{1}px' viewBox='0 0 {0} {1}'> <!-- END OF HEADER --> ### Make sure the atoms are in order ### | 1.878946 | 2 |
app/views.py | Njoro410/minute-go | 0 | 6620037 | <reponame>Njoro410/minute-go<filename>app/views.py
from nis import cat
from unicodedata import category
from flask import Blueprint, render_template, abort, request, redirect, url_for
from flask_login import login_required, current_user
from app.forms import CommentForm, UpdateProfile
from app.models import Categories, Pitches, User, Comments
from . import db, photos
views = Blueprint('views', __name__)
@views.route('/')
def index():
return render_template('index.html')
@views.route('/pitches', methods=['GET', 'POST'])
@login_required
def pitches():
pitches = Pitches.get_pitches()
if request.method == 'POST':
title = request.form.get('title')
content = request.form.get('pitch')
category = request.form.get('catselect')
new_pitch = Pitches(title=title, content=content,
user_id=current_user.id, categories_id=category,likes = 0, dislikes = 0)
new_pitch.save_pitch()
return redirect(request.referrer)
return render_template('pitches.html', user=current_user, pitches=pitches)
@views.route('/pitch/<int:id>', methods=['GET', 'POST'])
@login_required
def single_pitch(id):
pitch = Pitches.query.get(id)
coms = Comments.get_comments(id)
if request.args.get("like"):
pitch.likes = pitch.likes + 1
db.session.add(pitch)
db.session.commit()
return redirect("/pitch/{pitch_id}".format(pitch_id=pitch.id))
elif request.args.get("dislike"):
pitch.dislikes = pitch.dislikes + 1
db.session.add(pitch)
db.session.commit()
return redirect("/pitch/{pitch_id}".format(pitch_id=pitch.id))
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.content.data
new_comment = Comments(
comment=comment, user_id=current_user.id, pitch_id=id)
new_comment.save_comment()
return redirect(request.referrer)
return render_template('pitch.html', user=current_user, pitch=pitch, comment_form=comment_form, comments=coms)
@views.route('/user/<uname>/<id>')
def profile(uname,id):
user = User.query.filter_by(username=uname).first()
pitch = Pitches.query.filter_by(user_id=id).all()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user,pitches = pitch)
@views.route('/user/<uname>/<id>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname,id):
user = User.query.filter_by(username=uname,id=id).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username,id=user.id))
return render_template('profile/update.html', form=form)
@views.route('/user/<uname>/<id>/update/pic', methods=['POST'])
@login_required
def update_pic(uname,id):
user = User.query.filter_by(username=uname,id=id).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('views.profile', uname=uname, id=id))
@views.route('/category/<id>')
@login_required
def category(id):
pitch = Pitches.query.filter_by(categories_id=id).all()
cat = Categories.query.filter_by(id=id).first()
return render_template('category.html', pitches=pitch, category=cat)
| from nis import cat
from unicodedata import category
from flask import Blueprint, render_template, abort, request, redirect, url_for
from flask_login import login_required, current_user
from app.forms import CommentForm, UpdateProfile
from app.models import Categories, Pitches, User, Comments
from . import db, photos
views = Blueprint('views', __name__)
@views.route('/')
def index():
return render_template('index.html')
@views.route('/pitches', methods=['GET', 'POST'])
@login_required
def pitches():
pitches = Pitches.get_pitches()
if request.method == 'POST':
title = request.form.get('title')
content = request.form.get('pitch')
category = request.form.get('catselect')
new_pitch = Pitches(title=title, content=content,
user_id=current_user.id, categories_id=category,likes = 0, dislikes = 0)
new_pitch.save_pitch()
return redirect(request.referrer)
return render_template('pitches.html', user=current_user, pitches=pitches)
@views.route('/pitch/<int:id>', methods=['GET', 'POST'])
@login_required
def single_pitch(id):
pitch = Pitches.query.get(id)
coms = Comments.get_comments(id)
if request.args.get("like"):
pitch.likes = pitch.likes + 1
db.session.add(pitch)
db.session.commit()
return redirect("/pitch/{pitch_id}".format(pitch_id=pitch.id))
elif request.args.get("dislike"):
pitch.dislikes = pitch.dislikes + 1
db.session.add(pitch)
db.session.commit()
return redirect("/pitch/{pitch_id}".format(pitch_id=pitch.id))
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.content.data
new_comment = Comments(
comment=comment, user_id=current_user.id, pitch_id=id)
new_comment.save_comment()
return redirect(request.referrer)
return render_template('pitch.html', user=current_user, pitch=pitch, comment_form=comment_form, comments=coms)
@views.route('/user/<uname>/<id>')
def profile(uname,id):
user = User.query.filter_by(username=uname).first()
pitch = Pitches.query.filter_by(user_id=id).all()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user,pitches = pitch)
@views.route('/user/<uname>/<id>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname,id):
user = User.query.filter_by(username=uname,id=id).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username,id=user.id))
return render_template('profile/update.html', form=form)
@views.route('/user/<uname>/<id>/update/pic', methods=['POST'])
@login_required
def update_pic(uname,id):
user = User.query.filter_by(username=uname,id=id).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('views.profile', uname=uname, id=id))
@views.route('/category/<id>')
@login_required
def category(id):
pitch = Pitches.query.filter_by(categories_id=id).all()
cat = Categories.query.filter_by(id=id).first()
return render_template('category.html', pitches=pitch, category=cat) | none | 1 | 2.243571 | 2 | |
utils/blendersavevertscolor.py | teddydragoone/makehuman1.0.0alpha7 | 2 | 6620038 | # You may use, modify and redistribute this module under the terms of the GNU GPL.
"""
Save vertex Colors in Blender format.
=========================== ==================================================================
Project Name: **MakeHuman**
Module File Location: utils/blendersavevertscolor.py
Product Home Page: http://www.makehuman.org/
SourceForge Home Page: http://sourceforge.net/projects/makehuman/
Authors: (individual developers look into the AUTHORS file)
Copyright(c): MakeHuman Team 2001-2008
Licensing: GPL3 (see also http://makehuman.wiki.sourceforge.net/Licensing)
Coding Standards: See http://makehuman.wiki.sourceforge.net/DG_Coding_Standards
=========================== ==================================================================
This module implements a utility function to save vertex colors in Blender format.
"""
__docformat__ = 'restructuredtext'
import Blender
def saveVertsColors(path):
"""
This function saves vertex colors in Blender format.
Parameters
----------
path:
*path*. The file system path to the file to be written.
"""
activeObjs = Blender.Object.GetSelected()
activeObj = activeObjs[0]
me = activeObj.getData(mesh=True)
me.vertexColors= True # Enable face, vertex colors
try:
vertsAlpha = me.getVertsFromGroup("alpha")
except:
vertsAlpha = []
try:
fileDescriptor = open(path, "w")
except:
print "error to save obj file"
return 0
for f in me.faces:
alpha0 = 255
alpha1 = 255
alpha2 = 255
alpha3 = 255
if len(f.verts) == 4:
#split quad colors in 2 trigs colors
c0 = f.col[0]
c1 = f.col[1]
c2 = f.col[2]
c3 = f.col[3]
i0 = f.verts[0].index
i1 = f.verts[1].index
i2 = f.verts[2].index
i3 = f.verts[3].index
if i0 in vertsAlpha:
alpha0 = 0
if i1 in vertsAlpha:
alpha1 = 0
if i2 in vertsAlpha:
alpha2 = 0
if i3 in vertsAlpha:
alpha3 = 0
fileDescriptor.write("%i %i %i %i " % (c0.r,c0.g,c0.b,alpha0))
fileDescriptor.write("%i %i %i %i " % (c1.r,c1.g,c1.b,alpha1))
fileDescriptor.write("%i %i %i %i\n" % (c2.r,c2.g,c2.b,alpha2))
fileDescriptor.write("%i %i %i %i " % (c2.r,c2.g,c2.b,alpha2))
fileDescriptor.write("%i %i %i %i " % (c3.r,c3.g,c3.b,alpha3))
fileDescriptor.write("%i %i %i %i\n" % (c0.r,c0.g,c0.b,alpha0))
if len(f.verts) == 3:
#split quad colors in 2 trigs colors
c0 = f.col[0]
c1 = f.col[1]
c2 = f.col[2]
i0 = f.verts[0].index
i1 = f.verts[1].index
i2 = f.verts[2].index
if i0 in vertsAlpha:
alpha0 = 0
if i1 in vertsAlpha:
alpha1 = 0
if i2 in vertsAlpha:
alpha2 = 0
fileDescriptor.write("%i %i %i %i " % (c0.r,c0.g,c0.b,alpha0))
fileDescriptor.write("%i %i %i %i " % (c1.r,c1.g,c1.b,alpha1))
fileDescriptor.write("%i %i %i %i\n" % (c2.r,c2.g,c2.b,alpha2))
fileDescriptor.close()
saveVertsColors("/home/manuel/myworks/makehuman/data/3dobjs/upperbar.obj.colors")
| # You may use, modify and redistribute this module under the terms of the GNU GPL.
"""
Save vertex Colors in Blender format.
=========================== ==================================================================
Project Name: **MakeHuman**
Module File Location: utils/blendersavevertscolor.py
Product Home Page: http://www.makehuman.org/
SourceForge Home Page: http://sourceforge.net/projects/makehuman/
Authors: (individual developers look into the AUTHORS file)
Copyright(c): MakeHuman Team 2001-2008
Licensing: GPL3 (see also http://makehuman.wiki.sourceforge.net/Licensing)
Coding Standards: See http://makehuman.wiki.sourceforge.net/DG_Coding_Standards
=========================== ==================================================================
This module implements a utility function to save vertex colors in Blender format.
"""
__docformat__ = 'restructuredtext'
import Blender
def saveVertsColors(path):
"""
This function saves vertex colors in Blender format.
Parameters
----------
path:
*path*. The file system path to the file to be written.
"""
activeObjs = Blender.Object.GetSelected()
activeObj = activeObjs[0]
me = activeObj.getData(mesh=True)
me.vertexColors= True # Enable face, vertex colors
try:
vertsAlpha = me.getVertsFromGroup("alpha")
except:
vertsAlpha = []
try:
fileDescriptor = open(path, "w")
except:
print "error to save obj file"
return 0
for f in me.faces:
alpha0 = 255
alpha1 = 255
alpha2 = 255
alpha3 = 255
if len(f.verts) == 4:
#split quad colors in 2 trigs colors
c0 = f.col[0]
c1 = f.col[1]
c2 = f.col[2]
c3 = f.col[3]
i0 = f.verts[0].index
i1 = f.verts[1].index
i2 = f.verts[2].index
i3 = f.verts[3].index
if i0 in vertsAlpha:
alpha0 = 0
if i1 in vertsAlpha:
alpha1 = 0
if i2 in vertsAlpha:
alpha2 = 0
if i3 in vertsAlpha:
alpha3 = 0
fileDescriptor.write("%i %i %i %i " % (c0.r,c0.g,c0.b,alpha0))
fileDescriptor.write("%i %i %i %i " % (c1.r,c1.g,c1.b,alpha1))
fileDescriptor.write("%i %i %i %i\n" % (c2.r,c2.g,c2.b,alpha2))
fileDescriptor.write("%i %i %i %i " % (c2.r,c2.g,c2.b,alpha2))
fileDescriptor.write("%i %i %i %i " % (c3.r,c3.g,c3.b,alpha3))
fileDescriptor.write("%i %i %i %i\n" % (c0.r,c0.g,c0.b,alpha0))
if len(f.verts) == 3:
#split quad colors in 2 trigs colors
c0 = f.col[0]
c1 = f.col[1]
c2 = f.col[2]
i0 = f.verts[0].index
i1 = f.verts[1].index
i2 = f.verts[2].index
if i0 in vertsAlpha:
alpha0 = 0
if i1 in vertsAlpha:
alpha1 = 0
if i2 in vertsAlpha:
alpha2 = 0
fileDescriptor.write("%i %i %i %i " % (c0.r,c0.g,c0.b,alpha0))
fileDescriptor.write("%i %i %i %i " % (c1.r,c1.g,c1.b,alpha1))
fileDescriptor.write("%i %i %i %i\n" % (c2.r,c2.g,c2.b,alpha2))
fileDescriptor.close()
saveVertsColors("/home/manuel/myworks/makehuman/data/3dobjs/upperbar.obj.colors")
| en | 0.533547 | # You may use, modify and redistribute this module under the terms of the GNU GPL. Save vertex Colors in Blender format. =========================== ================================================================== Project Name: **MakeHuman** Module File Location: utils/blendersavevertscolor.py Product Home Page: http://www.makehuman.org/ SourceForge Home Page: http://sourceforge.net/projects/makehuman/ Authors: (individual developers look into the AUTHORS file) Copyright(c): MakeHuman Team 2001-2008 Licensing: GPL3 (see also http://makehuman.wiki.sourceforge.net/Licensing) Coding Standards: See http://makehuman.wiki.sourceforge.net/DG_Coding_Standards =========================== ================================================================== This module implements a utility function to save vertex colors in Blender format. This function saves vertex colors in Blender format. Parameters ---------- path: *path*. The file system path to the file to be written. # Enable face, vertex colors #split quad colors in 2 trigs colors #split quad colors in 2 trigs colors | 1.97713 | 2 |
src/ackermann_controller/ackermann_controller/ackermann_teleop_joy.py | Aposhian/ackermann-odometry | 5 | 6620039 | <reponame>Aposhian/ackermann-odometry<gh_stars>1-10
import rclpy
from ackermann_msgs.msg import AckermannDrive, AckermannDriveStamped
from rclpy.node import Node
from sensor_msgs.msg import Joy
class AckermannTeleopJoy(Node):
"""Publishes ackermann control by joystick"""
def __init__(self):
super().__init__('ackermann_teleop_joy')
self.declare_parameters(
namespace='',
parameters=[
('max_steering_angle', 3.14 / 2),
('max_speed', 1.0)
]
)
self.max_steering_angle = float(self.get_parameter('max_steering_angle').value)
self.max_speed = float(self.get_parameter('max_speed').value)
self.create_subscription(
Joy,
'joy',
self.joy_callback,
10
)
self.publisher = self.create_publisher(AckermannDriveStamped, 'ackermann_cmd', 10)
def joy_callback(self, msg: Joy):
speed = self.max_speed * msg.axes[1]
steering_direction = 1 if speed > 0 else -1
self.publisher.publish(AckermannDriveStamped(
header=msg.header,
drive=AckermannDrive(
steering_angle=steering_direction * self.max_steering_angle * msg.axes[2],
speed=speed
)
))
def main(args=None):
rclpy.init(args=args)
ackermann_teleop_joy = AckermannTeleopJoy()
rclpy.spin(ackermann_teleop_joy)
ackermann_teleop_joy.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| import rclpy
from ackermann_msgs.msg import AckermannDrive, AckermannDriveStamped
from rclpy.node import Node
from sensor_msgs.msg import Joy
class AckermannTeleopJoy(Node):
"""Publishes ackermann control by joystick"""
def __init__(self):
super().__init__('ackermann_teleop_joy')
self.declare_parameters(
namespace='',
parameters=[
('max_steering_angle', 3.14 / 2),
('max_speed', 1.0)
]
)
self.max_steering_angle = float(self.get_parameter('max_steering_angle').value)
self.max_speed = float(self.get_parameter('max_speed').value)
self.create_subscription(
Joy,
'joy',
self.joy_callback,
10
)
self.publisher = self.create_publisher(AckermannDriveStamped, 'ackermann_cmd', 10)
def joy_callback(self, msg: Joy):
speed = self.max_speed * msg.axes[1]
steering_direction = 1 if speed > 0 else -1
self.publisher.publish(AckermannDriveStamped(
header=msg.header,
drive=AckermannDrive(
steering_angle=steering_direction * self.max_steering_angle * msg.axes[2],
speed=speed
)
))
def main(args=None):
rclpy.init(args=args)
ackermann_teleop_joy = AckermannTeleopJoy()
rclpy.spin(ackermann_teleop_joy)
ackermann_teleop_joy.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | en | 0.909577 | Publishes ackermann control by joystick | 2.540075 | 3 |
policosm/geoFunctions/linestrings_operation.py | ComplexCity/policosm | 6 | 6620040 | <reponame>ComplexCity/policosm
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created in March 2020 in ComplexCity Lab
@author: github.com/fpfaende
"""
import numpy as np
from scipy.spatial import ConvexHull
from shapely.geometry import LineString, Point, Polygon
from shapely.ops import unary_union
def join_linestrings(linestrings: list) -> LineString:
r"""
Simplify a list of n LineString [LineString a, ...,LineString n].
if the list is empty, function returns None
if the list had one element, it returns this element
if the list has several segments but disjoint function returns None
otherwise it create a new linestring from the successive segments
Returns
-------
LineString
a LineString joining all LineString segments
"""
if len(linestrings) == 0:
return None
elif len(linestrings) == 1:
return linestrings[0]
simple_line = []
for i in range(len(linestrings) - 1):
if linestrings[i].coords[-1] != linestrings[i + 1].coords[0]:
return None
simple_line += list(linestrings[i].coords[:-1])
simple_line += list(linestrings[i + 1].coords)
return LineString(simple_line)
def cut_linestring(line: LineString, distance: float) -> list:
r"""
Cuts a line in two at a distance from its starting point
courtesy of shapely doc
Parameters
----------
:param line : LineString to cut
:param distance : float distance to cut
Returns
-------
LineString : list
list of LineString
"""
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [LineString(coords[:i + 1]), LineString(coords[i:])]
if pd > distance:
cp = line.interpolate(distance)
return [LineString(coords[:i] + [(cp.x, cp.y)]), LineString([(cp.x, cp.y)] + coords[i:])]
def asymmetric_segment_buffer(a: Point, b: Point, a_buffer: float, b_buffer: float) -> Polygon:
r"""
create an asymmetric polygonal buffer around a segment a––b
Parameters
----------
:param a : shapely Point
:param b : shapely Point
:param a_buffer : float, buffered value around a
:param b_buffer : float, buffered value around b
Returns
-------
Polygon :
buffered segment
"""
if a_buffer > 0:
a = a.buffer(a_buffer)
a = np.ravel(np.array(a.exterior.coords.xy), order='F')
else:
a = np.ravel(np.array(a.xy), order='F')
if b_buffer > 0:
b = b.buffer(b_buffer)
b = np.ravel(np.array(b.exterior.coords.xy), order='F')
else:
b = np.ravel(np.array(b.xy), order='F')
h = np.concatenate((a, b), axis=None)
h = np.reshape(h, (-1, 2))
hull = ConvexHull(h)
xs, ys = h[hull.vertices, 0], h[hull.vertices, 1]
return Polygon(zip(xs, ys))
def asymmetric_line_buffer(line: LineString, start_value: float, end_value: float) -> Polygon:
r"""
create an asymmetric polygonal buffer around a line made of one or more segment a––•––•–––––b
it splits the line into segments, interpolate the buffers value between start and end and make a union of polygons around it
Parameters
----------
:param line: a LineString
:param start_value: a float representing distance from start of the line
:param end_value: a float representing distance from end of the line
Returns
-------
Polygon :
unionized asymmetric buffered segments of a line
"""
if start_value == end_value:
return line.buffer(start_value)
coords = list(line.coords)
dists = [0] + [LineString(line.coords[:i + 1]).length for i in range(1, len(coords) - 1)] + [line.length]
buffers = np.interp(dists, [0, line.length], [start_value, end_value])
polygons = []
for i in range(len(coords) - 1):
polygons.append(asymmetric_segment_buffer(Point(coords[i]), Point(coords[i + 1]), buffers[i], buffers[i + 1]))
return unary_union(polygons)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created in March 2020 in ComplexCity Lab
@author: github.com/fpfaende
"""
import numpy as np
from scipy.spatial import ConvexHull
from shapely.geometry import LineString, Point, Polygon
from shapely.ops import unary_union
def join_linestrings(linestrings: list) -> LineString:
r"""
Simplify a list of n LineString [LineString a, ...,LineString n].
if the list is empty, function returns None
if the list had one element, it returns this element
if the list has several segments but disjoint function returns None
otherwise it create a new linestring from the successive segments
Returns
-------
LineString
a LineString joining all LineString segments
"""
if len(linestrings) == 0:
return None
elif len(linestrings) == 1:
return linestrings[0]
simple_line = []
for i in range(len(linestrings) - 1):
if linestrings[i].coords[-1] != linestrings[i + 1].coords[0]:
return None
simple_line += list(linestrings[i].coords[:-1])
simple_line += list(linestrings[i + 1].coords)
return LineString(simple_line)
def cut_linestring(line: LineString, distance: float) -> list:
r"""
Cuts a line in two at a distance from its starting point
courtesy of shapely doc
Parameters
----------
:param line : LineString to cut
:param distance : float distance to cut
Returns
-------
LineString : list
list of LineString
"""
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [LineString(coords[:i + 1]), LineString(coords[i:])]
if pd > distance:
cp = line.interpolate(distance)
return [LineString(coords[:i] + [(cp.x, cp.y)]), LineString([(cp.x, cp.y)] + coords[i:])]
def asymmetric_segment_buffer(a: Point, b: Point, a_buffer: float, b_buffer: float) -> Polygon:
r"""
create an asymmetric polygonal buffer around a segment a––b
Parameters
----------
:param a : shapely Point
:param b : shapely Point
:param a_buffer : float, buffered value around a
:param b_buffer : float, buffered value around b
Returns
-------
Polygon :
buffered segment
"""
if a_buffer > 0:
a = a.buffer(a_buffer)
a = np.ravel(np.array(a.exterior.coords.xy), order='F')
else:
a = np.ravel(np.array(a.xy), order='F')
if b_buffer > 0:
b = b.buffer(b_buffer)
b = np.ravel(np.array(b.exterior.coords.xy), order='F')
else:
b = np.ravel(np.array(b.xy), order='F')
h = np.concatenate((a, b), axis=None)
h = np.reshape(h, (-1, 2))
hull = ConvexHull(h)
xs, ys = h[hull.vertices, 0], h[hull.vertices, 1]
return Polygon(zip(xs, ys))
def asymmetric_line_buffer(line: LineString, start_value: float, end_value: float) -> Polygon:
r"""
create an asymmetric polygonal buffer around a line made of one or more segment a––•––•–––––b
it splits the line into segments, interpolate the buffers value between start and end and make a union of polygons around it
Parameters
----------
:param line: a LineString
:param start_value: a float representing distance from start of the line
:param end_value: a float representing distance from end of the line
Returns
-------
Polygon :
unionized asymmetric buffered segments of a line
"""
if start_value == end_value:
return line.buffer(start_value)
coords = list(line.coords)
dists = [0] + [LineString(line.coords[:i + 1]).length for i in range(1, len(coords) - 1)] + [line.length]
buffers = np.interp(dists, [0, line.length], [start_value, end_value])
polygons = []
for i in range(len(coords) - 1):
polygons.append(asymmetric_segment_buffer(Point(coords[i]), Point(coords[i + 1]), buffers[i], buffers[i + 1]))
return unary_union(polygons) | en | 0.744121 | #!/usr/bin/python # -*- coding: utf-8 -*- Created in March 2020 in ComplexCity Lab @author: github.com/fpfaende Simplify a list of n LineString [LineString a, ...,LineString n]. if the list is empty, function returns None if the list had one element, it returns this element if the list has several segments but disjoint function returns None otherwise it create a new linestring from the successive segments Returns ------- LineString a LineString joining all LineString segments Cuts a line in two at a distance from its starting point courtesy of shapely doc Parameters ---------- :param line : LineString to cut :param distance : float distance to cut Returns ------- LineString : list list of LineString create an asymmetric polygonal buffer around a segment a––b Parameters ---------- :param a : shapely Point :param b : shapely Point :param a_buffer : float, buffered value around a :param b_buffer : float, buffered value around b Returns ------- Polygon : buffered segment create an asymmetric polygonal buffer around a line made of one or more segment a––•––•–––––b it splits the line into segments, interpolate the buffers value between start and end and make a union of polygons around it Parameters ---------- :param line: a LineString :param start_value: a float representing distance from start of the line :param end_value: a float representing distance from end of the line Returns ------- Polygon : unionized asymmetric buffered segments of a line | 2.763469 | 3 |
datasets/QuickTrackletDetMatchDatasetPB.py | icicle4/TranSTAM | 0 | 6620041 | <gh_stars>0
import os
import sys
from torch.utils.data import Dataset
import random
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(__file__), "../..")))
from datasets.memmory_bank_pb import MemoryBank
from datasets.warp_tracklet_and_detection_tensor import *
from models.StrictSimpleCaseTracker import ImpossibleTracker
from utils.metrics import AvgMetric
def collate_fn(batch):
batch_track_nums = [item["track_num"] for item in batch]
batch_det_nums = [item["det_num"] for item in batch]
max_track_num = max(batch_track_nums)
max_det_num = max(batch_det_nums)
batch_tracks = merge_batch_tracklets_tensors(batch, max_track_num)
batch_dets = merge_batch_detections_tensors(batch, max_det_num)
batch_det_mask = torch.zeros((len(batch), max_det_num), dtype=torch.long)
for i, det_num in enumerate(batch_det_nums):
batch_det_mask[i, :det_num] = 1
labels = [item["labels"] for item in batch]
final_res = {}
final_res.update(batch_tracks)
final_res.update(batch_dets)
final_res.update(
{
"labels": labels,
"det_mask": batch_det_mask,
"track_num": torch.from_numpy(np.asarray(batch_track_nums, dtype=np.int32)),
"det_num": torch.from_numpy(np.asarray(batch_det_nums, dtype=np.int32))
}
)
return final_res
class TrackDetMatchDatasetPublic(Dataset):
def __init__(self, stage="train", root_dir=".", tracklet_sample_region_length=5, drop_simple_case=False,
cache_window=20, threshold=3.0):
self.drop_simple_case = False
self.cache_window = cache_window
if drop_simple_case:
self.drop_simple_case = True
self.impossible_tracker = ImpossibleTracker(threshold)
else:
self.impossible_tracker = None
super(TrackDetMatchDatasetPublic, self).__init__()
self.memory_bank = MemoryBank(root_dir)
self.stage = stage
self.memory_bank.load_pbs_to_memory_bank(phase=stage)
self.memory_bank.build_memmap_memory()
self.tracklet_sample_region_length = tracklet_sample_region_length
self.sample_start_frames = []
for video_id in self.memory_bank.video_names:
video_frames = self.memory_bank.video_frames[video_id]
sorted_video_frames = sorted(list(video_frames))
for video_frame in sorted_video_frames[:-tracklet_sample_region_length]:
end_frame = video_frame + self.tracklet_sample_region_length
track_ids = self.memory_bank.get_track_ids_in_frame(video_id, end_frame)
if len(track_ids) > 0:
self.sample_start_frames.append([video_id, video_frame])
print("build dataset done, total sample num is : {}".format(len(self.sample_start_frames)))
def __len__(self):
return len(self.sample_start_frames)
def __getitem__(self, item):
video_id, start_frame = self.sample_start_frames[item]
end_frame = start_frame + self.tracklet_sample_region_length
sampled_frames = list(range(start_frame, end_frame))
video_width, video_height = self.memory_bank.get_video_width_and_height(video_id)
tracklets = defaultdict(dict)
for f in sampled_frames:
track_and_det_ids = self.memory_bank.get_track_ids_in_frame(video_id, f)
for track_id, det_idx in track_and_det_ids:
detection = self.memory_bank.query_detection(video_id, track_id, det_idx)
detection = normalize_det_xywh_with_video_wh(detection, video_width, video_height)
tracklets[track_id][f] = detection
total_tracklets_ids = list(tracklets.keys())
sample_detection_frame = end_frame
detections = defaultdict()
track_and_det_ids = self.memory_bank.get_track_ids_in_frame(video_id, sample_detection_frame)
for track_id, det_idx in track_and_det_ids:
detection = self.memory_bank.query_detection(video_id, track_id, det_idx)
detection["frame"] = sample_detection_frame - start_frame
detection = normalize_det_xywh_with_video_wh(detection, video_width, video_height)
detections[track_id] = detection
filled_tracklets = []
random.shuffle(total_tracklets_ids)
for track_id in total_tracklets_ids:
filled_tracklets.append(
convet_tracklet_to_tensor(tracklets[track_id], sampled_frames, self.cache_window, track_id=track_id)
)
tracklets_tensors = merge_tracklet_into_tensors(filled_tracklets)
track_num = len(total_tracklets_ids)
detection_ids = list(detections.keys())
det_num = len(detection_ids)
random.shuffle(detection_ids)
detections = [detections[id] for id in detection_ids]
detections_tensor = convert_detections_to_tensor(detections)
if self.drop_simple_case:
impossible_mask = self.impossible_tracker.forward(tracklets_tensors, detections_tensor,
sample_window_size=self.tracklet_sample_region_length)
label_mask = torch.zeros_like(impossible_mask)
for i, detection_id in enumerate(detection_ids):
if detection_id in total_tracklets_ids:
j = total_tracklets_ids.index(detection_id)
label_mask[0, i, j] = True
impossible_mask = torch.logical_or(label_mask, impossible_mask)
else:
impossible_mask = None
labels = []
for detection_id in detection_ids:
if detection_id in total_tracklets_ids:
label = total_tracklets_ids.index(detection_id)
labels.append(label)
else:
labels.append(-1)
res = {
"track_num": track_num,
"det_num": det_num,
"tracks": tracklets_tensors,
"detections": detections_tensor,
"labels": labels,
"impossible_mask": impossible_mask
}
return res
def build_dataset(stage, args):
dataset = TrackDetMatchDatasetPublic(stage=stage,
root_dir=args.dataset_dir,
tracklet_sample_region_length=args.track_history_len
)
return dataset
if __name__ == '__main__':
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, required=True)
args = parser.parse_args()
time1 = time.time()
simple_dataset = TrackDetMatchDatasetPublic(stage="train",
root_dir=args.dataset_dir,
tracklet_sample_region_length=20,
drop_simple_case=True)
tracklet_num_avg_metric = AvgMetric()
det_num_avg_metric = AvgMetric()
raw_track_num_avg_metric = AvgMetric()
raw_det_num_avg_metric = AvgMetric()
tp_num_avg_metric = AvgMetric()
fp_num_avg_metric = AvgMetric()
track_mul_det_num_avg_metric = AvgMetric()
for i, sample in enumerate(simple_dataset):
tracklet_num_avg_metric.update(sample['track_num'])
det_num_avg_metric.update(sample['det_num'])
track_mul_det_num_avg_metric.update(sample['track_num'] * sample['det_num'])
tp_num_avg_metric.update(sample["tp_num"])
fp_num_avg_metric.update(sample["fp_num"])
raw_track_num_avg_metric.update(sample["raw_track_num"])
raw_det_num_avg_metric.update(sample["raw_det_num"])
if i % 1000 == 0:
print('total track num: {}, avg track num per graph: {}, max track num: {}'.format(tracklet_num_avg_metric.total,
tracklet_num_avg_metric.avg,
tracklet_num_avg_metric.max))
print('total det num: {}, avg det num per graph: {}, max det num: {}'.format(det_num_avg_metric.total,
det_num_avg_metric.avg,
det_num_avg_metric.max))
print('total tp num: {}, avg tp num per graph: {}'.format(tp_num_avg_metric.total, tp_num_avg_metric.avg))
print('total fp num: {}, avg fp num per graph: {}'.format(fp_num_avg_metric.total, fp_num_avg_metric.avg))
print('raw track num: {}, avg track num per graph: {}, max track num: {}'.format(
raw_track_num_avg_metric.total,
raw_track_num_avg_metric.avg,
raw_track_num_avg_metric.max))
print('raw det num: {}, avg det num per graph: {}, max det num: {}'.format(raw_det_num_avg_metric.total,
raw_det_num_avg_metric.avg,
raw_det_num_avg_metric.max))
print('track_mul_det num: {}, avg per graph: {}, max num: {}'.format(track_mul_det_num_avg_metric.total,
track_mul_det_num_avg_metric.avg,
track_mul_det_num_avg_metric.max))
time2 = time.time()
print("Build dataset and loop one epoch cost time: {}".format(time2 - time1))
print('total track num: {}, avg track num per graph: {}, max track num: {}'.format(tracklet_num_avg_metric.total,
tracklet_num_avg_metric.avg,
tracklet_num_avg_metric.max))
print('total det num: {}, avg det num per graph: {}, max det num: {}'.format(det_num_avg_metric.total,
det_num_avg_metric.avg,
det_num_avg_metric.max))
print('raw track num: {}, avg track num per graph: {}, max track num: {}'.format(
raw_track_num_avg_metric.total,
raw_track_num_avg_metric.avg,
raw_track_num_avg_metric.max))
print('raw det num: {}, avg det num per graph: {}, max det num: {}'.format(raw_det_num_avg_metric.total,
raw_det_num_avg_metric.avg,
raw_det_num_avg_metric.max))
print('track_mul_det num: {}, avg per graph: {}, max num: {}'.format(track_mul_det_num_avg_metric.total,
track_mul_det_num_avg_metric.avg,
track_mul_det_num_avg_metric.max))
print('Simple Tracker Performance, precison: {}, recall: {}'.format(
tp_num_avg_metric.total / (tp_num_avg_metric.total + fp_num_avg_metric.total),
(tp_num_avg_metric.total + fp_num_avg_metric.total) / (tp_num_avg_metric.total + fp_num_avg_metric.total + det_num_avg_metric.total)
))
| import os
import sys
from torch.utils.data import Dataset
import random
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(__file__), "../..")))
from datasets.memmory_bank_pb import MemoryBank
from datasets.warp_tracklet_and_detection_tensor import *
from models.StrictSimpleCaseTracker import ImpossibleTracker
from utils.metrics import AvgMetric
def collate_fn(batch):
batch_track_nums = [item["track_num"] for item in batch]
batch_det_nums = [item["det_num"] for item in batch]
max_track_num = max(batch_track_nums)
max_det_num = max(batch_det_nums)
batch_tracks = merge_batch_tracklets_tensors(batch, max_track_num)
batch_dets = merge_batch_detections_tensors(batch, max_det_num)
batch_det_mask = torch.zeros((len(batch), max_det_num), dtype=torch.long)
for i, det_num in enumerate(batch_det_nums):
batch_det_mask[i, :det_num] = 1
labels = [item["labels"] for item in batch]
final_res = {}
final_res.update(batch_tracks)
final_res.update(batch_dets)
final_res.update(
{
"labels": labels,
"det_mask": batch_det_mask,
"track_num": torch.from_numpy(np.asarray(batch_track_nums, dtype=np.int32)),
"det_num": torch.from_numpy(np.asarray(batch_det_nums, dtype=np.int32))
}
)
return final_res
class TrackDetMatchDatasetPublic(Dataset):
def __init__(self, stage="train", root_dir=".", tracklet_sample_region_length=5, drop_simple_case=False,
cache_window=20, threshold=3.0):
self.drop_simple_case = False
self.cache_window = cache_window
if drop_simple_case:
self.drop_simple_case = True
self.impossible_tracker = ImpossibleTracker(threshold)
else:
self.impossible_tracker = None
super(TrackDetMatchDatasetPublic, self).__init__()
self.memory_bank = MemoryBank(root_dir)
self.stage = stage
self.memory_bank.load_pbs_to_memory_bank(phase=stage)
self.memory_bank.build_memmap_memory()
self.tracklet_sample_region_length = tracklet_sample_region_length
self.sample_start_frames = []
for video_id in self.memory_bank.video_names:
video_frames = self.memory_bank.video_frames[video_id]
sorted_video_frames = sorted(list(video_frames))
for video_frame in sorted_video_frames[:-tracklet_sample_region_length]:
end_frame = video_frame + self.tracklet_sample_region_length
track_ids = self.memory_bank.get_track_ids_in_frame(video_id, end_frame)
if len(track_ids) > 0:
self.sample_start_frames.append([video_id, video_frame])
print("build dataset done, total sample num is : {}".format(len(self.sample_start_frames)))
def __len__(self):
return len(self.sample_start_frames)
def __getitem__(self, item):
video_id, start_frame = self.sample_start_frames[item]
end_frame = start_frame + self.tracklet_sample_region_length
sampled_frames = list(range(start_frame, end_frame))
video_width, video_height = self.memory_bank.get_video_width_and_height(video_id)
tracklets = defaultdict(dict)
for f in sampled_frames:
track_and_det_ids = self.memory_bank.get_track_ids_in_frame(video_id, f)
for track_id, det_idx in track_and_det_ids:
detection = self.memory_bank.query_detection(video_id, track_id, det_idx)
detection = normalize_det_xywh_with_video_wh(detection, video_width, video_height)
tracklets[track_id][f] = detection
total_tracklets_ids = list(tracklets.keys())
sample_detection_frame = end_frame
detections = defaultdict()
track_and_det_ids = self.memory_bank.get_track_ids_in_frame(video_id, sample_detection_frame)
for track_id, det_idx in track_and_det_ids:
detection = self.memory_bank.query_detection(video_id, track_id, det_idx)
detection["frame"] = sample_detection_frame - start_frame
detection = normalize_det_xywh_with_video_wh(detection, video_width, video_height)
detections[track_id] = detection
filled_tracklets = []
random.shuffle(total_tracklets_ids)
for track_id in total_tracklets_ids:
filled_tracklets.append(
convet_tracklet_to_tensor(tracklets[track_id], sampled_frames, self.cache_window, track_id=track_id)
)
tracklets_tensors = merge_tracklet_into_tensors(filled_tracklets)
track_num = len(total_tracklets_ids)
detection_ids = list(detections.keys())
det_num = len(detection_ids)
random.shuffle(detection_ids)
detections = [detections[id] for id in detection_ids]
detections_tensor = convert_detections_to_tensor(detections)
if self.drop_simple_case:
impossible_mask = self.impossible_tracker.forward(tracklets_tensors, detections_tensor,
sample_window_size=self.tracklet_sample_region_length)
label_mask = torch.zeros_like(impossible_mask)
for i, detection_id in enumerate(detection_ids):
if detection_id in total_tracklets_ids:
j = total_tracklets_ids.index(detection_id)
label_mask[0, i, j] = True
impossible_mask = torch.logical_or(label_mask, impossible_mask)
else:
impossible_mask = None
labels = []
for detection_id in detection_ids:
if detection_id in total_tracklets_ids:
label = total_tracklets_ids.index(detection_id)
labels.append(label)
else:
labels.append(-1)
res = {
"track_num": track_num,
"det_num": det_num,
"tracks": tracklets_tensors,
"detections": detections_tensor,
"labels": labels,
"impossible_mask": impossible_mask
}
return res
def build_dataset(stage, args):
dataset = TrackDetMatchDatasetPublic(stage=stage,
root_dir=args.dataset_dir,
tracklet_sample_region_length=args.track_history_len
)
return dataset
if __name__ == '__main__':
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, required=True)
args = parser.parse_args()
time1 = time.time()
simple_dataset = TrackDetMatchDatasetPublic(stage="train",
root_dir=args.dataset_dir,
tracklet_sample_region_length=20,
drop_simple_case=True)
tracklet_num_avg_metric = AvgMetric()
det_num_avg_metric = AvgMetric()
raw_track_num_avg_metric = AvgMetric()
raw_det_num_avg_metric = AvgMetric()
tp_num_avg_metric = AvgMetric()
fp_num_avg_metric = AvgMetric()
track_mul_det_num_avg_metric = AvgMetric()
for i, sample in enumerate(simple_dataset):
tracklet_num_avg_metric.update(sample['track_num'])
det_num_avg_metric.update(sample['det_num'])
track_mul_det_num_avg_metric.update(sample['track_num'] * sample['det_num'])
tp_num_avg_metric.update(sample["tp_num"])
fp_num_avg_metric.update(sample["fp_num"])
raw_track_num_avg_metric.update(sample["raw_track_num"])
raw_det_num_avg_metric.update(sample["raw_det_num"])
if i % 1000 == 0:
print('total track num: {}, avg track num per graph: {}, max track num: {}'.format(tracklet_num_avg_metric.total,
tracklet_num_avg_metric.avg,
tracklet_num_avg_metric.max))
print('total det num: {}, avg det num per graph: {}, max det num: {}'.format(det_num_avg_metric.total,
det_num_avg_metric.avg,
det_num_avg_metric.max))
print('total tp num: {}, avg tp num per graph: {}'.format(tp_num_avg_metric.total, tp_num_avg_metric.avg))
print('total fp num: {}, avg fp num per graph: {}'.format(fp_num_avg_metric.total, fp_num_avg_metric.avg))
print('raw track num: {}, avg track num per graph: {}, max track num: {}'.format(
raw_track_num_avg_metric.total,
raw_track_num_avg_metric.avg,
raw_track_num_avg_metric.max))
print('raw det num: {}, avg det num per graph: {}, max det num: {}'.format(raw_det_num_avg_metric.total,
raw_det_num_avg_metric.avg,
raw_det_num_avg_metric.max))
print('track_mul_det num: {}, avg per graph: {}, max num: {}'.format(track_mul_det_num_avg_metric.total,
track_mul_det_num_avg_metric.avg,
track_mul_det_num_avg_metric.max))
time2 = time.time()
print("Build dataset and loop one epoch cost time: {}".format(time2 - time1))
print('total track num: {}, avg track num per graph: {}, max track num: {}'.format(tracklet_num_avg_metric.total,
tracklet_num_avg_metric.avg,
tracklet_num_avg_metric.max))
print('total det num: {}, avg det num per graph: {}, max det num: {}'.format(det_num_avg_metric.total,
det_num_avg_metric.avg,
det_num_avg_metric.max))
print('raw track num: {}, avg track num per graph: {}, max track num: {}'.format(
raw_track_num_avg_metric.total,
raw_track_num_avg_metric.avg,
raw_track_num_avg_metric.max))
print('raw det num: {}, avg det num per graph: {}, max det num: {}'.format(raw_det_num_avg_metric.total,
raw_det_num_avg_metric.avg,
raw_det_num_avg_metric.max))
print('track_mul_det num: {}, avg per graph: {}, max num: {}'.format(track_mul_det_num_avg_metric.total,
track_mul_det_num_avg_metric.avg,
track_mul_det_num_avg_metric.max))
print('Simple Tracker Performance, precison: {}, recall: {}'.format(
tp_num_avg_metric.total / (tp_num_avg_metric.total + fp_num_avg_metric.total),
(tp_num_avg_metric.total + fp_num_avg_metric.total) / (tp_num_avg_metric.total + fp_num_avg_metric.total + det_num_avg_metric.total)
)) | none | 1 | 1.932335 | 2 | |
recipes/Python/577601_A_MSSQL_XML_importer_for_MySQL/recipe-577601.py | tdiprima/code | 2,023 | 6620042 | <filename>recipes/Python/577601_A_MSSQL_XML_importer_for_MySQL/recipe-577601.py
##
## XMLPush
##
## A small utility to move XML as exported from SQL Server or MS Access to a
## mySQL table.
##
## Not too fancy, but gets the job done. As with all recipes, season to taste
## depending on your needs.
##
## <NAME>
## 08 March 2011
## <EMAIL>
##
import re
from lxml import etree
import MySQLdb
def XMLPush(datafile,server,dbuser,password,dbname,table)
def quote(text):
return "'" + text + "'"
def doublequote(text):
return '"' + text + '"'
connection = MySQLdb.connectionect (host = server,
user = dbuser,
passwd = password,
db = dbname)
cursor = connection.cursor()
tree = etree.parse(datafile)
root = tree.getroot()
# Parse out data from XML
data = []
for child in root:
datarow = {}
for leaf in child:
datarow[leaf.tag] = leaf.text
data.append(datarow)
# Push data to DB
statements = []
for row in data:
columns = []
values = []
for item in row:
# Reformatting data to mySQL formats
columns.append(item.replace(" ",""))
temp = row[item]
values.append(quote(temp.replace("'","")))
# Push data to table
statement = "INSERT INTO " + table + " (" + ",".join(columns) + ") VALUES (" + \
",".join(values) + ")"
statements.append(statement)
for statement in statements:
cursor.execute(statement)
connection.commit()
connection.close()
XMLPush("MainTable.xml","mySQL-Server","mySQL-User","mySQL-Password","DB_Name","Table")
| <filename>recipes/Python/577601_A_MSSQL_XML_importer_for_MySQL/recipe-577601.py
##
## XMLPush
##
## A small utility to move XML as exported from SQL Server or MS Access to a
## mySQL table.
##
## Not too fancy, but gets the job done. As with all recipes, season to taste
## depending on your needs.
##
## <NAME>
## 08 March 2011
## <EMAIL>
##
import re
from lxml import etree
import MySQLdb
def XMLPush(datafile,server,dbuser,password,dbname,table)
def quote(text):
return "'" + text + "'"
def doublequote(text):
return '"' + text + '"'
connection = MySQLdb.connectionect (host = server,
user = dbuser,
passwd = password,
db = dbname)
cursor = connection.cursor()
tree = etree.parse(datafile)
root = tree.getroot()
# Parse out data from XML
data = []
for child in root:
datarow = {}
for leaf in child:
datarow[leaf.tag] = leaf.text
data.append(datarow)
# Push data to DB
statements = []
for row in data:
columns = []
values = []
for item in row:
# Reformatting data to mySQL formats
columns.append(item.replace(" ",""))
temp = row[item]
values.append(quote(temp.replace("'","")))
# Push data to table
statement = "INSERT INTO " + table + " (" + ",".join(columns) + ") VALUES (" + \
",".join(values) + ")"
statements.append(statement)
for statement in statements:
cursor.execute(statement)
connection.commit()
connection.close()
XMLPush("MainTable.xml","mySQL-Server","mySQL-User","mySQL-Password","DB_Name","Table")
| en | 0.764703 | ## ## XMLPush ## ## A small utility to move XML as exported from SQL Server or MS Access to a ## mySQL table. ## ## Not too fancy, but gets the job done. As with all recipes, season to taste ## depending on your needs. ## ## <NAME> ## 08 March 2011 ## <EMAIL> ## # Parse out data from XML # Push data to DB # Reformatting data to mySQL formats # Push data to table | 2.529108 | 3 |
examples/hello.py | altescy/logexp | 14 | 6620043 | <filename>examples/hello.py<gh_stars>10-100
import logexp
ex = logexp.Experiment("my_experiment")
@ex.worker("my_worker")
class MyWorker(logexp.BaseWorker):
def config(self):
self.message = "hello world"
def run(self):
with self.storage.open("hello.txt", "w") as f:
f.write(self.message)
report = logexp.Report()
report["foo"] = "bar"
return report
| <filename>examples/hello.py<gh_stars>10-100
import logexp
ex = logexp.Experiment("my_experiment")
@ex.worker("my_worker")
class MyWorker(logexp.BaseWorker):
def config(self):
self.message = "hello world"
def run(self):
with self.storage.open("hello.txt", "w") as f:
f.write(self.message)
report = logexp.Report()
report["foo"] = "bar"
return report
| none | 1 | 2.769697 | 3 | |
hostel/companies/views.py | phylocko/hostel | 0 | 6620044 | from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404, redirect, reverse
from hostel.settings import LOGIN_URL
from hostel.spy.models import Spy
from .models import Company, CompanySearch
from .forms import CompanyForm
@login_required(login_url=LOGIN_URL)
def company_list(request):
context = {'app': 'companies'}
tab = request.GET.get('tab', 'all')
context['tab'] = tab
companies = Company.objects.all().order_by('name')
if request.GET:
search_string = request.GET.get('search')
context['search_string'] = search_string
if search_string:
companies = CompanySearch().search(search_string)
context['listing'] = companies
return render(request, 'bs3/companies/company_list.html', context)
paginator = Paginator(companies, request.user.pagination_count)
page = request.GET.get('page', 1)
companies = paginator.get_page(page)
context['listing'] = companies
return render(request, 'bs3/companies/company_list.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('companies.add_company')
def company_create(request):
context = {'app': 'cities', 'tab': 'add'}
form = CompanyForm(request.POST or None)
if form.is_valid():
form.save()
Spy().created(form.instance, form, request)
messages.add_message(request, messages.SUCCESS, "Компания успешно создана")
return redirect(company_view, company_id=form.instance.pk)
context['form'] = form
return render(request, 'bs3/companies/company_create.html', context)
@login_required(login_url=LOGIN_URL)
def company_view(request, company_id):
context = {'app': 'companies', 'mode': 'view'}
company = get_object_or_404(Company, pk=company_id)
context['company'] = company
logs = Spy.objects.filter(object_name='company', object_id=company.pk).order_by('-time')[0:100]
context['logs'] = logs
if request.POST:
action = request.POST.get('action')
if action == 'delete_company':
company.delete()
messages.add_message(request, messages.SUCCESS, 'Компания удалена')
return redirect(company_list)
return render(request, 'bs3/companies/company_view.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('companies.change_company')
def company_update(request, company_id):
context = {'app': 'companies'}
company = get_object_or_404(Company, pk=company_id)
context['company'] = company
form = CompanyForm(instance=company)
context['form'] = form
if request.method == "POST":
form = CompanyForm(request.POST, instance=company)
context['form'] = form
if form.is_valid():
old_object = Company.objects.get(pk=company.pk)
Spy().changed(company, old_object, form, request)
form.save()
messages.add_message(request, messages.SUCCESS, 'Данные компании обновлены')
url = reverse(company_view, args=[company.pk])
return redirect(url)
return render(request, 'bs3/companies/company_update.html', context)
| from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404, redirect, reverse
from hostel.settings import LOGIN_URL
from hostel.spy.models import Spy
from .models import Company, CompanySearch
from .forms import CompanyForm
@login_required(login_url=LOGIN_URL)
def company_list(request):
context = {'app': 'companies'}
tab = request.GET.get('tab', 'all')
context['tab'] = tab
companies = Company.objects.all().order_by('name')
if request.GET:
search_string = request.GET.get('search')
context['search_string'] = search_string
if search_string:
companies = CompanySearch().search(search_string)
context['listing'] = companies
return render(request, 'bs3/companies/company_list.html', context)
paginator = Paginator(companies, request.user.pagination_count)
page = request.GET.get('page', 1)
companies = paginator.get_page(page)
context['listing'] = companies
return render(request, 'bs3/companies/company_list.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('companies.add_company')
def company_create(request):
context = {'app': 'cities', 'tab': 'add'}
form = CompanyForm(request.POST or None)
if form.is_valid():
form.save()
Spy().created(form.instance, form, request)
messages.add_message(request, messages.SUCCESS, "Компания успешно создана")
return redirect(company_view, company_id=form.instance.pk)
context['form'] = form
return render(request, 'bs3/companies/company_create.html', context)
@login_required(login_url=LOGIN_URL)
def company_view(request, company_id):
context = {'app': 'companies', 'mode': 'view'}
company = get_object_or_404(Company, pk=company_id)
context['company'] = company
logs = Spy.objects.filter(object_name='company', object_id=company.pk).order_by('-time')[0:100]
context['logs'] = logs
if request.POST:
action = request.POST.get('action')
if action == 'delete_company':
company.delete()
messages.add_message(request, messages.SUCCESS, 'Компания удалена')
return redirect(company_list)
return render(request, 'bs3/companies/company_view.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('companies.change_company')
def company_update(request, company_id):
context = {'app': 'companies'}
company = get_object_or_404(Company, pk=company_id)
context['company'] = company
form = CompanyForm(instance=company)
context['form'] = form
if request.method == "POST":
form = CompanyForm(request.POST, instance=company)
context['form'] = form
if form.is_valid():
old_object = Company.objects.get(pk=company.pk)
Spy().changed(company, old_object, form, request)
form.save()
messages.add_message(request, messages.SUCCESS, 'Данные компании обновлены')
url = reverse(company_view, args=[company.pk])
return redirect(url)
return render(request, 'bs3/companies/company_update.html', context)
| none | 1 | 1.93512 | 2 | |
tests/utils/test_abbr.py | kclowes/eth-abi | 127 | 6620045 | import pytest
from eth_abi.utils.string import (
abbr,
)
@pytest.mark.parametrize(
'value,expected,limit',
(
(1234567891234567891, '1234567891234567891', None),
(12345678912345678912, '12345678912345678912', None),
(123456789123456789123, '12345678912345678...', None),
('asdf' * 30, "'asdfasdfasdfasdf...", None),
(list(range(100)), '[0, 1, 2, 3, 4, 5...', None),
(1234567891234567891, '...', 3),
(1234567891234567891, '1...', 4),
)
)
def test_abbr(value, expected, limit):
if limit is not None:
actual = abbr(value, limit)
else:
actual = abbr(value)
assert actual == expected
def test_abbr_throws_value_errors():
with pytest.raises(ValueError):
abbr('asdf', limit=2)
| import pytest
from eth_abi.utils.string import (
abbr,
)
@pytest.mark.parametrize(
'value,expected,limit',
(
(1234567891234567891, '1234567891234567891', None),
(12345678912345678912, '12345678912345678912', None),
(123456789123456789123, '12345678912345678...', None),
('asdf' * 30, "'asdfasdfasdfasdf...", None),
(list(range(100)), '[0, 1, 2, 3, 4, 5...', None),
(1234567891234567891, '...', 3),
(1234567891234567891, '1...', 4),
)
)
def test_abbr(value, expected, limit):
if limit is not None:
actual = abbr(value, limit)
else:
actual = abbr(value)
assert actual == expected
def test_abbr_throws_value_errors():
with pytest.raises(ValueError):
abbr('asdf', limit=2)
| none | 1 | 2.108914 | 2 | |
plot_alllines.py | joshfuchs/ZZCeti_analysis | 0 | 6620046 | <reponame>joshfuchs/ZZCeti_analysis
'''
Written May 2016 by JTF
Plots normalized spectrum with normalized model
'''
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import os
from glob import glob
#Get list of directories in current directory. Only keep if it is a date by looking for ./2
##directories = [x[0] for x in os.walk('./') if x[0][0:3]=='./2']
directories = [x[0] for x in os.walk('./') if x[0][-1]=='A']
plots_pdf = PdfPages('all_plotfits.pdf')
for xdir in directories:
os.chdir(xdir)
##wds = sorted(glob('norm_*master*txt'))
wds = sorted(glob('norm_*txt'))
##models = sorted(glob('model_*master*txt'))
models = sorted(glob('model_*txt'))
#print xdir
#print wds
#print models
#print '\n'
for y in wds:
wdlamb,wdinten, wdsigma = np.genfromtxt(y,unpack=True,skip_header=1)
modlamb,modinten = np.genfromtxt(models[wds.index(y)],unpack=True)
#print y, models[wds.index(y)]
title = str(xdir) + ': ' + str(y[3:y.find('930')])
#Break up spectrum into individual lines for plotting
alphalow = 6413.
alphahigh = 6713.
betalow = 4721.
betahigh = 5001.
gammalow = 4220.
gammahigh = 4460.
deltalow = 4031. #4031
deltahigh = 4191. #4191
epsilonlow = 3925. #3925
epsilonhigh = 4021. # 4021
heightlow = 3859. #3859
heighthigh = 3925. # 3925
hninelow = 3815. #3815
hninehigh = 3855. #3855
htenlow = 3785. #3785
htenhigh = 3815.
if wdlamb.max() > 6000.:
wdlambalpha, wdintenalpha = wdlamb[np.where((wdlamb > alphalow) & (wdlamb < alphahigh+1))], wdinten[np.where((wdlamb > alphalow) & (wdlamb < alphahigh+1.))]
modlambalpha, modintenalpha = modlamb[np.where((modlamb > alphalow) & (modlamb < alphahigh+1))], modinten[np.where((modlamb > alphalow) & (modlamb < alphahigh+1.))]
wdlambbeta, wdintenbeta = wdlamb[np.where((wdlamb > betalow) & (wdlamb < betahigh+1))], wdinten[np.where((wdlamb > betalow) & (wdlamb < betahigh+1.))]
modlambbeta, modintenbeta = modlamb[np.where((modlamb > betalow) & (modlamb < betahigh+1))], modinten[np.where((modlamb > betalow) & (modlamb < betahigh+1.))]
wdlambgamma, wdintengamma = wdlamb[np.where((wdlamb > gammalow) & (wdlamb < gammahigh+1))], wdinten[np.where((wdlamb > gammalow) & (wdlamb < gammahigh+1.))]##wdlamb[349:460], wdinten[349:460]
modlambgamma, modintengamma = modlamb[np.where((modlamb > gammalow) & (modlamb < gammahigh+1))], modinten[np.where((modlamb > gammalow) & (modlamb < gammahigh+1.))]##modlamb[349:459], modinten[349:459]
wdlambdelta, wdintendelta = wdlamb[np.where((wdlamb > deltalow) & (wdlamb < deltahigh+1))], wdinten[np.where((wdlamb > deltalow) & (wdlamb < deltahigh+1.))]##wdlamb[230:349], wdinten[230:349]
modlambdelta, modintendelta = modlamb[np.where((modlamb > deltalow) & (modlamb < deltahigh+1))], modinten[np.where((modlamb > deltalow) & (modlamb < deltahigh+1.))]##odlamb[230:348], modinten[230:348]
wdlambepsilon, wdintenepsilon = wdlamb[np.where((wdlamb > epsilonlow) & (wdlamb < epsilonhigh+1))], wdinten[np.where((wdlamb > epsilonlow) & (wdlamb < epsilonhigh+1.))]##wdlamb[150:230], wdinten[150:230]
modlambepsilon, modintenepsilon = modlamb[np.where((modlamb > epsilonlow) & (modlamb < epsilonhigh+1))], modinten[np.where((modlamb > epsilonlow) & (modlamb < epsilonhigh+1.))]##modlamb[150:229], modinten[150:229]
wdlamb8, wdinten8 = wdlamb[np.where((wdlamb > heightlow) & (wdlamb < heighthigh+1))], wdinten[np.where((wdlamb > heightlow) & (wdlamb < heighthigh+1.))]##wdlamb[85:149], wdinten[85:149]
modlamb8, modinten8 = modlamb[np.where((modlamb > heightlow) & (modlamb < heighthigh+1))], modinten[np.where((modlamb > heightlow) & (modlamb < heighthigh+1.))]##modlamb[85:148], modinten[85:148]
wdlamb9, wdinten9 = wdlamb[np.where((wdlamb > hninelow) & (wdlamb < hninehigh+1))], wdinten[np.where((wdlamb > hninelow) & (wdlamb < hninehigh+1.))]##wdlamb[37:85], wdinten[37:85]
modlamb9, modinten9 = modlamb[np.where((modlamb > hninelow) & (modlamb < hninehigh+1))], modinten[np.where((modlamb > hninelow) & (modlamb < hninehigh+1.))]##modlamb[37:84], modinten[37:84]
wdlamb10, wdinten10 = wdlamb[np.where((wdlamb > htenlow) & (wdlamb < htenhigh+1))], wdinten[np.where((wdlamb > htenlow) & (wdlamb < htenhigh+1.))]##wdlamb[:37], wdinten[:37]
modlamb10, modinten10 = modlamb[np.where((modlamb > htenlow) & (modlamb < htenhigh+1))], modinten[np.where((modlamb > htenlow) & (modlamb < htenhigh+1.))]##modlamb[:37], modinten[:37]
plt.clf()
if wdlamb.max() > 6000.:
plt.plot(wdlambalpha-6564.6,wdintenalpha-0.3,'k')
plt.plot(modlambalpha-6564.6,modintenalpha-0.3,'r')
plt.plot(wdlambbeta-4862.6,wdintenbeta,'k')
plt.plot(modlambbeta-4862.6, modintenbeta,'r')
plt.plot(wdlambgamma-4341.6, wdintengamma+0.3,'k')
plt.plot(modlambgamma-4341.6, modintengamma+0.3,'r')
plt.plot(wdlambdelta-4102.9, wdintendelta+0.6,'k')
plt.plot(modlambdelta-4102.9, modintendelta+0.6,'r')
plt.plot(wdlambepsilon-3971.1, wdintenepsilon+0.9,'k')
plt.plot(modlambepsilon-3971.1, modintenepsilon+0.9,'r')
plt.plot(wdlamb8-3890.1,wdinten8+1.2,'k')
plt.plot(modlamb8-3890.1, modinten8+1.2,'r')
plt.plot(wdlamb9-3836.4,wdinten9+1.5,'k')
plt.plot(modlamb9-3836.4, modinten9+1.5,'r')
plt.plot(wdlamb10-3798.9,wdinten10+1.8,'k')
plt.plot(modlamb10-3798.9, modinten10+1.8,'r')
plt.xlabel('Relative Wavelength (Ang.)')
plt.ylabel('Relative Flux')
plt.title(title)
plt.xlim(-150,150)
plt.ylim(0,3.5)
#plt.savefig('VPHAS1813-2138_12000_625.png',format='png')#,dpi=12000)
plots_pdf.savefig()
#plt.show()
os.chdir('../')
plots_pdf.close()
| '''
Written May 2016 by JTF
Plots normalized spectrum with normalized model
'''
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import os
from glob import glob
#Get list of directories in current directory. Only keep if it is a date by looking for ./2
##directories = [x[0] for x in os.walk('./') if x[0][0:3]=='./2']
directories = [x[0] for x in os.walk('./') if x[0][-1]=='A']
plots_pdf = PdfPages('all_plotfits.pdf')
for xdir in directories:
os.chdir(xdir)
##wds = sorted(glob('norm_*master*txt'))
wds = sorted(glob('norm_*txt'))
##models = sorted(glob('model_*master*txt'))
models = sorted(glob('model_*txt'))
#print xdir
#print wds
#print models
#print '\n'
for y in wds:
wdlamb,wdinten, wdsigma = np.genfromtxt(y,unpack=True,skip_header=1)
modlamb,modinten = np.genfromtxt(models[wds.index(y)],unpack=True)
#print y, models[wds.index(y)]
title = str(xdir) + ': ' + str(y[3:y.find('930')])
#Break up spectrum into individual lines for plotting
alphalow = 6413.
alphahigh = 6713.
betalow = 4721.
betahigh = 5001.
gammalow = 4220.
gammahigh = 4460.
deltalow = 4031. #4031
deltahigh = 4191. #4191
epsilonlow = 3925. #3925
epsilonhigh = 4021. # 4021
heightlow = 3859. #3859
heighthigh = 3925. # 3925
hninelow = 3815. #3815
hninehigh = 3855. #3855
htenlow = 3785. #3785
htenhigh = 3815.
if wdlamb.max() > 6000.:
wdlambalpha, wdintenalpha = wdlamb[np.where((wdlamb > alphalow) & (wdlamb < alphahigh+1))], wdinten[np.where((wdlamb > alphalow) & (wdlamb < alphahigh+1.))]
modlambalpha, modintenalpha = modlamb[np.where((modlamb > alphalow) & (modlamb < alphahigh+1))], modinten[np.where((modlamb > alphalow) & (modlamb < alphahigh+1.))]
wdlambbeta, wdintenbeta = wdlamb[np.where((wdlamb > betalow) & (wdlamb < betahigh+1))], wdinten[np.where((wdlamb > betalow) & (wdlamb < betahigh+1.))]
modlambbeta, modintenbeta = modlamb[np.where((modlamb > betalow) & (modlamb < betahigh+1))], modinten[np.where((modlamb > betalow) & (modlamb < betahigh+1.))]
wdlambgamma, wdintengamma = wdlamb[np.where((wdlamb > gammalow) & (wdlamb < gammahigh+1))], wdinten[np.where((wdlamb > gammalow) & (wdlamb < gammahigh+1.))]##wdlamb[349:460], wdinten[349:460]
modlambgamma, modintengamma = modlamb[np.where((modlamb > gammalow) & (modlamb < gammahigh+1))], modinten[np.where((modlamb > gammalow) & (modlamb < gammahigh+1.))]##modlamb[349:459], modinten[349:459]
wdlambdelta, wdintendelta = wdlamb[np.where((wdlamb > deltalow) & (wdlamb < deltahigh+1))], wdinten[np.where((wdlamb > deltalow) & (wdlamb < deltahigh+1.))]##wdlamb[230:349], wdinten[230:349]
modlambdelta, modintendelta = modlamb[np.where((modlamb > deltalow) & (modlamb < deltahigh+1))], modinten[np.where((modlamb > deltalow) & (modlamb < deltahigh+1.))]##odlamb[230:348], modinten[230:348]
wdlambepsilon, wdintenepsilon = wdlamb[np.where((wdlamb > epsilonlow) & (wdlamb < epsilonhigh+1))], wdinten[np.where((wdlamb > epsilonlow) & (wdlamb < epsilonhigh+1.))]##wdlamb[150:230], wdinten[150:230]
modlambepsilon, modintenepsilon = modlamb[np.where((modlamb > epsilonlow) & (modlamb < epsilonhigh+1))], modinten[np.where((modlamb > epsilonlow) & (modlamb < epsilonhigh+1.))]##modlamb[150:229], modinten[150:229]
wdlamb8, wdinten8 = wdlamb[np.where((wdlamb > heightlow) & (wdlamb < heighthigh+1))], wdinten[np.where((wdlamb > heightlow) & (wdlamb < heighthigh+1.))]##wdlamb[85:149], wdinten[85:149]
modlamb8, modinten8 = modlamb[np.where((modlamb > heightlow) & (modlamb < heighthigh+1))], modinten[np.where((modlamb > heightlow) & (modlamb < heighthigh+1.))]##modlamb[85:148], modinten[85:148]
wdlamb9, wdinten9 = wdlamb[np.where((wdlamb > hninelow) & (wdlamb < hninehigh+1))], wdinten[np.where((wdlamb > hninelow) & (wdlamb < hninehigh+1.))]##wdlamb[37:85], wdinten[37:85]
modlamb9, modinten9 = modlamb[np.where((modlamb > hninelow) & (modlamb < hninehigh+1))], modinten[np.where((modlamb > hninelow) & (modlamb < hninehigh+1.))]##modlamb[37:84], modinten[37:84]
wdlamb10, wdinten10 = wdlamb[np.where((wdlamb > htenlow) & (wdlamb < htenhigh+1))], wdinten[np.where((wdlamb > htenlow) & (wdlamb < htenhigh+1.))]##wdlamb[:37], wdinten[:37]
modlamb10, modinten10 = modlamb[np.where((modlamb > htenlow) & (modlamb < htenhigh+1))], modinten[np.where((modlamb > htenlow) & (modlamb < htenhigh+1.))]##modlamb[:37], modinten[:37]
plt.clf()
if wdlamb.max() > 6000.:
plt.plot(wdlambalpha-6564.6,wdintenalpha-0.3,'k')
plt.plot(modlambalpha-6564.6,modintenalpha-0.3,'r')
plt.plot(wdlambbeta-4862.6,wdintenbeta,'k')
plt.plot(modlambbeta-4862.6, modintenbeta,'r')
plt.plot(wdlambgamma-4341.6, wdintengamma+0.3,'k')
plt.plot(modlambgamma-4341.6, modintengamma+0.3,'r')
plt.plot(wdlambdelta-4102.9, wdintendelta+0.6,'k')
plt.plot(modlambdelta-4102.9, modintendelta+0.6,'r')
plt.plot(wdlambepsilon-3971.1, wdintenepsilon+0.9,'k')
plt.plot(modlambepsilon-3971.1, modintenepsilon+0.9,'r')
plt.plot(wdlamb8-3890.1,wdinten8+1.2,'k')
plt.plot(modlamb8-3890.1, modinten8+1.2,'r')
plt.plot(wdlamb9-3836.4,wdinten9+1.5,'k')
plt.plot(modlamb9-3836.4, modinten9+1.5,'r')
plt.plot(wdlamb10-3798.9,wdinten10+1.8,'k')
plt.plot(modlamb10-3798.9, modinten10+1.8,'r')
plt.xlabel('Relative Wavelength (Ang.)')
plt.ylabel('Relative Flux')
plt.title(title)
plt.xlim(-150,150)
plt.ylim(0,3.5)
#plt.savefig('VPHAS1813-2138_12000_625.png',format='png')#,dpi=12000)
plots_pdf.savefig()
#plt.show()
os.chdir('../')
plots_pdf.close() | en | 0.44211 | Written May 2016 by JTF Plots normalized spectrum with normalized model #Get list of directories in current directory. Only keep if it is a date by looking for ./2 ##directories = [x[0] for x in os.walk('./') if x[0][0:3]=='./2'] ##wds = sorted(glob('norm_*master*txt')) ##models = sorted(glob('model_*master*txt')) #print xdir #print wds #print models #print '\n' #print y, models[wds.index(y)] #Break up spectrum into individual lines for plotting #4031 #4191 #3925 # 4021 #3859 # 3925 #3815 #3855 #3785 ##wdlamb[349:460], wdinten[349:460] ##modlamb[349:459], modinten[349:459] ##wdlamb[230:349], wdinten[230:349] ##odlamb[230:348], modinten[230:348] ##wdlamb[150:230], wdinten[150:230] ##modlamb[150:229], modinten[150:229] ##wdlamb[85:149], wdinten[85:149] ##modlamb[85:148], modinten[85:148] ##wdlamb[37:85], wdinten[37:85] ##modlamb[37:84], modinten[37:84] ##wdlamb[:37], wdinten[:37] ##modlamb[:37], modinten[:37] #plt.savefig('VPHAS1813-2138_12000_625.png',format='png')#,dpi=12000) #plt.show() | 2.243165 | 2 |
ask/qa/migrations/0001_initial.py | a-shar/web_tech | 0 | 6620047 | <reponame>a-shar/web_tech
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('added_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=1024)),
('text', models.TextField()),
('added_at', models.DateTimeField(default=django.utils.timezone.now)),
('rating', models.IntegerField()),
('author', models.ForeignKey(related_name='author', to=settings.AUTH_USER_MODEL)),
('likes', models.ManyToManyField(related_name='likes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(to='qa.Question'),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('added_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=1024)),
('text', models.TextField()),
('added_at', models.DateTimeField(default=django.utils.timezone.now)),
('rating', models.IntegerField()),
('author', models.ForeignKey(related_name='author', to=settings.AUTH_USER_MODEL)),
('likes', models.ManyToManyField(related_name='likes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(to='qa.Question'),
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.845397 | 2 |
Exercicios/ex087.py | HenriqueSOliver/Python_CursoEmVideo | 1 | 6620048 | <filename>Exercicios/ex087.py
''' Aprimore o desafio anterior, mostrando no final:
A) A soma de todos os valores pares digitados.
B) A soma dos valores da terceira coluna.
C) O maior valor da segunda linha.'''
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somap = somal = somac= 0
for l in range (0, 3):
for c in range(0, 3):
matriz [l][c] = int(input(f'Digite um número [{l, c}]:'))
print('-='*30)
for l in range (0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]',end='')
if matriz [l][c] % 2 == 0:
somap += matriz [l][c]
print()
print('-='*30)
print(f'A soma dos valores pares {somap}')
print('-='*30)
for l in range (0, 3):
somal += matriz [l][2]
print(f'A soma dos valores apresentados na terceira coluna é: {somal}')
print('-='*30)
for c in range(0, 3):
if c == 0:
somac = matriz [1][c]
elif matriz [1][c] > somac:
somac = matriz [1][c]
print(f'O maior valor da segunda linda é {somac}')
print('-='*30)
| <filename>Exercicios/ex087.py
''' Aprimore o desafio anterior, mostrando no final:
A) A soma de todos os valores pares digitados.
B) A soma dos valores da terceira coluna.
C) O maior valor da segunda linha.'''
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somap = somal = somac= 0
for l in range (0, 3):
for c in range(0, 3):
matriz [l][c] = int(input(f'Digite um número [{l, c}]:'))
print('-='*30)
for l in range (0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]',end='')
if matriz [l][c] % 2 == 0:
somap += matriz [l][c]
print()
print('-='*30)
print(f'A soma dos valores pares {somap}')
print('-='*30)
for l in range (0, 3):
somal += matriz [l][2]
print(f'A soma dos valores apresentados na terceira coluna é: {somal}')
print('-='*30)
for c in range(0, 3):
if c == 0:
somac = matriz [1][c]
elif matriz [1][c] > somac:
somac = matriz [1][c]
print(f'O maior valor da segunda linda é {somac}')
print('-='*30)
| pt | 0.66353 | Aprimore o desafio anterior, mostrando no final: A) A soma de todos os valores pares digitados. B) A soma dos valores da terceira coluna. C) O maior valor da segunda linha. | 3.913546 | 4 |
asyncio/plugin_cpu.py | nicolargo/glancesarena | 0 | 6620049 | #!/usr/bin/env python3
from plugin import GlancesPlugin
class Cpu(GlancesPlugin):
"""CPU plugin
Stat example:
{'cpu_percent': 0.0, 'user': 0.0, 'nice': 0.0, 'system': 0.0, 'idle': 0.0,
'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0, 'steal': 0.0, 'guest': 0.0,
'guest_nice': 0.0, 'ctx_switches': 3271803998, 'interrupts': 1205799541,
'soft_interrupts': 787542175, 'syscalls': 0, 'ctx_switches_rate': None,
'interrupts_rate': None, 'soft_interrupts_rate': None, 'syscalls_rate': None}
"""
def __init__(self):
super(Cpu, self).__init__()
# Set the PsUtil functions used to grab the stats
self.args['psutil_fct'] = [{'name': 'cpu_percent', 'args': {'interval': 0.0}},
{'name': 'cpu_times_percent', 'args': {'interval': 0.0}},
{'name': 'cpu_stats'}]
# Transform the stats
# Gauge: for each gauge field, create an extra field with the rate per second
self.args['transform'].update({'gauge': ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls']})
# Init the view layout
self.args['view_layout'] = {
'columns': [
# First column
{
'lines': [['CPU', '{cpu_percent}%'],
['user', '{user}'],
['system', '{system}'],
['iowait', '{iowait}']]
},
# Second column
{
'lines': [['idle', '{idle}'],
['irq', '{irq}'],
['nice', '{nice}'],
['steal', '{steal}']]
},
# Third column
{
'lines': [['ctx_sw', '{ctx_switches}'],
['inter', '{interrupts}'],
['sw_int', '{soft_interrupts}']]
},
]
}
cpu = Cpu()
| #!/usr/bin/env python3
from plugin import GlancesPlugin
class Cpu(GlancesPlugin):
"""CPU plugin
Stat example:
{'cpu_percent': 0.0, 'user': 0.0, 'nice': 0.0, 'system': 0.0, 'idle': 0.0,
'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0, 'steal': 0.0, 'guest': 0.0,
'guest_nice': 0.0, 'ctx_switches': 3271803998, 'interrupts': 1205799541,
'soft_interrupts': 787542175, 'syscalls': 0, 'ctx_switches_rate': None,
'interrupts_rate': None, 'soft_interrupts_rate': None, 'syscalls_rate': None}
"""
def __init__(self):
super(Cpu, self).__init__()
# Set the PsUtil functions used to grab the stats
self.args['psutil_fct'] = [{'name': 'cpu_percent', 'args': {'interval': 0.0}},
{'name': 'cpu_times_percent', 'args': {'interval': 0.0}},
{'name': 'cpu_stats'}]
# Transform the stats
# Gauge: for each gauge field, create an extra field with the rate per second
self.args['transform'].update({'gauge': ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls']})
# Init the view layout
self.args['view_layout'] = {
'columns': [
# First column
{
'lines': [['CPU', '{cpu_percent}%'],
['user', '{user}'],
['system', '{system}'],
['iowait', '{iowait}']]
},
# Second column
{
'lines': [['idle', '{idle}'],
['irq', '{irq}'],
['nice', '{nice}'],
['steal', '{steal}']]
},
# Third column
{
'lines': [['ctx_sw', '{ctx_switches}'],
['inter', '{interrupts}'],
['sw_int', '{soft_interrupts}']]
},
]
}
cpu = Cpu()
| en | 0.223615 | #!/usr/bin/env python3 CPU plugin Stat example: {'cpu_percent': 0.0, 'user': 0.0, 'nice': 0.0, 'system': 0.0, 'idle': 0.0, 'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0, 'steal': 0.0, 'guest': 0.0, 'guest_nice': 0.0, 'ctx_switches': 3271803998, 'interrupts': 1205799541, 'soft_interrupts': 787542175, 'syscalls': 0, 'ctx_switches_rate': None, 'interrupts_rate': None, 'soft_interrupts_rate': None, 'syscalls_rate': None} # Set the PsUtil functions used to grab the stats # Transform the stats # Gauge: for each gauge field, create an extra field with the rate per second # Init the view layout # First column # Second column # Third column | 2.47472 | 2 |
panobbgo_lib/__init__.py | haraldschilly/panobbgo | 1 | 6620050 | <filename>panobbgo_lib/__init__.py
"""
This is the `library` of Panobbgo, used by the main part and the library.
For example, the basic :class:`~panobbgo_lib.lib.Problem` class is defined here.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .lib import Point, Problem, Result
__version__ = '0.1'
| <filename>panobbgo_lib/__init__.py
"""
This is the `library` of Panobbgo, used by the main part and the library.
For example, the basic :class:`~panobbgo_lib.lib.Problem` class is defined here.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .lib import Point, Problem, Result
__version__ = '0.1'
| en | 0.755669 | This is the `library` of Panobbgo, used by the main part and the library. For example, the basic :class:`~panobbgo_lib.lib.Problem` class is defined here. .. moduleauthor:: <NAME> <<EMAIL>> | 1.750053 | 2 |