max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
corrosion_fact.py
rxm562/SeismoPi
0
6617551
# ALA (2001) Correction Modified Calculator import pandas as pd # calculator def Correction_Modifiers(pipe_info, Mat_type=None, Soil_type=None, Age=None): pipe_info = pd.DataFrame(data = pipe_info.values, columns =pipe_info.columns, index = pipe_info.index.astype('str')) # Pipe characteristics, strings if Mat_type is None: k1_wei = {'AC': 1.0, 'CI': 1.0, 'DI': 0.5, 'PVC': 0.5, 'STL': 0.7, 'RCCP': 0.2} def corrosion_factor(arrlike): Age = arrlike['Age'] Soil_type = arrlike['Soil_type'] Mat_type = arrlike['Mat_type'] if Mat_type == 'CI': if Soil_type == 'H': if Age < 1920: corrosion_rate = 3.0 elif Age < 1960: corrosion_rate = -0.05*Age+99 else: corrosion_rate = 1.0 elif Soil_type == 'M': if Age < 1920: corrosion_rate = 2.0 elif Age < 1960: corrosion_rate = -0.025*Age+50 else: corrosion_rate = 1.0 elif Soil_type == 'L': corrosion_rate = 1.0 else: print('Soil Type Index Wrong') elif Mat_type == 'DI': corrosion_rate = 1.5 else: corrosion_rate = 1.0 return corrosion_rate k1 = pipe_info['Mat_type'].map(k1_wei) # Modifier for material type k2 = pipe_info.apply(corrosion_factor,axis = 1) # Modifier for corrosion C = k1*k2 return C
# ALA (2001) Correction Modified Calculator import pandas as pd # calculator def Correction_Modifiers(pipe_info, Mat_type=None, Soil_type=None, Age=None): pipe_info = pd.DataFrame(data = pipe_info.values, columns =pipe_info.columns, index = pipe_info.index.astype('str')) # Pipe characteristics, strings if Mat_type is None: k1_wei = {'AC': 1.0, 'CI': 1.0, 'DI': 0.5, 'PVC': 0.5, 'STL': 0.7, 'RCCP': 0.2} def corrosion_factor(arrlike): Age = arrlike['Age'] Soil_type = arrlike['Soil_type'] Mat_type = arrlike['Mat_type'] if Mat_type == 'CI': if Soil_type == 'H': if Age < 1920: corrosion_rate = 3.0 elif Age < 1960: corrosion_rate = -0.05*Age+99 else: corrosion_rate = 1.0 elif Soil_type == 'M': if Age < 1920: corrosion_rate = 2.0 elif Age < 1960: corrosion_rate = -0.025*Age+50 else: corrosion_rate = 1.0 elif Soil_type == 'L': corrosion_rate = 1.0 else: print('Soil Type Index Wrong') elif Mat_type == 'DI': corrosion_rate = 1.5 else: corrosion_rate = 1.0 return corrosion_rate k1 = pipe_info['Mat_type'].map(k1_wei) # Modifier for material type k2 = pipe_info.apply(corrosion_factor,axis = 1) # Modifier for corrosion C = k1*k2 return C
en
0.60425
# ALA (2001) Correction Modified Calculator # calculator # Pipe characteristics, strings # Modifier for material type # Modifier for corrosion
3.042219
3
tests/factories.py
narnikgamarnikus/django-url-shorter
0
6617552
import factory from url_shorter import models import factory.fuzzy as fuzzy import datetime class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: 'user-{}'.format(n)) email = factory.Sequence(lambda n: '<EMAIL>'.<EMAIL>(n)) password = factory.PostGenerationMethodCall('set_password', 'password') class Meta: model = 'auth.User' django_get_or_create = ('username', ) class URLFactory(factory.django.DjangoModelFactory): user = factory.SubFactory(UserFactory) long_url = 'https://google.com/' created = fuzzy.FuzzyNaiveDateTime(datetime.datetime.now()) count = fuzzy.FuzzyInteger(0) class Meta: model = models.URL
import factory from url_shorter import models import factory.fuzzy as fuzzy import datetime class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: 'user-{}'.format(n)) email = factory.Sequence(lambda n: '<EMAIL>'.<EMAIL>(n)) password = factory.PostGenerationMethodCall('set_password', 'password') class Meta: model = 'auth.User' django_get_or_create = ('username', ) class URLFactory(factory.django.DjangoModelFactory): user = factory.SubFactory(UserFactory) long_url = 'https://google.com/' created = fuzzy.FuzzyNaiveDateTime(datetime.datetime.now()) count = fuzzy.FuzzyInteger(0) class Meta: model = models.URL
none
1
2.498365
2
FeedCrawler.py
rix1337/RSScrawler
63
6617553
<gh_stars>10-100 # -*- coding: utf-8 -*- # FeedCrawler # Projekt von https://github.com/rix1337 import multiprocessing from feedcrawler import crawler if __name__ == '__main__': multiprocessing.freeze_support() crawler.main()
# -*- coding: utf-8 -*- # FeedCrawler # Projekt von https://github.com/rix1337 import multiprocessing from feedcrawler import crawler if __name__ == '__main__': multiprocessing.freeze_support() crawler.main()
en
0.306275
# -*- coding: utf-8 -*- # FeedCrawler # Projekt von https://github.com/rix1337
1.625952
2
xypath/matchers.py
mikeAdamss/xypath
16
6617554
from hamcrest.library.text.substringmatcher import SubstringMatcher from hamcrest.core.helpers.hasmethod import hasmethod class StringContainsInsensitive(SubstringMatcher): def __init__(self, substring): super(StringContainsInsensitive, self).__init__(substring) def _matches(self, item): if not hasmethod(item, 'lower'): return False low = item.lower() if not hasmethod(low, 'find'): return False return low.find(self.substring.lower()) >= 0 def relationship(self): return 'containing (case-insensitively)' def contains_insensitive_string(substring): return StringContainsInsensitive(substring)
from hamcrest.library.text.substringmatcher import SubstringMatcher from hamcrest.core.helpers.hasmethod import hasmethod class StringContainsInsensitive(SubstringMatcher): def __init__(self, substring): super(StringContainsInsensitive, self).__init__(substring) def _matches(self, item): if not hasmethod(item, 'lower'): return False low = item.lower() if not hasmethod(low, 'find'): return False return low.find(self.substring.lower()) >= 0 def relationship(self): return 'containing (case-insensitively)' def contains_insensitive_string(substring): return StringContainsInsensitive(substring)
none
1
3.142806
3
9365.py
SESCNCFUARTYOM/EGE
0
6617555
<reponame>SESCNCFUARTYOM/EGE s='8'*68 while ('222' in s) or ('888' in s): if ('222' in s): s=s.replace ('222','8',1) else: s=s.replace ('888','2',1) print(s)
s='8'*68 while ('222' in s) or ('888' in s): if ('222' in s): s=s.replace ('222','8',1) else: s=s.replace ('888','2',1) print(s)
none
1
4.008096
4
preprocess/read_caevo_out.py
prachir1501/NeuralDater
64
6617556
import argparse, os, sys, pdb, shlex, re, argparse from parse import parse from bs4 import BeautifulSoup from pprint import pprint from joblib import Parallel, delayed from pymongo import MongoClient from pymongo.errors import BulkWriteError """ Reads the output of CAEVO (xml files) and extracts teh depdendency tree edges and the tokenized document. """ def read_xml(src): text = open(src).read() soup = BeautifulSoup(text, 'html.parser') try: doc = { '_id': soup.find('file')['name'], 'sentences': [], 'links': [] } for entry in soup.find_all('entry'): sent = { 'tokens': [], 'deps': [], 'events': [], 'times': [] } for ele in entry.find('tokens').find_all('t'): e = {} if ele.get_text() == '" " """ ""': e['left'], e['text'], e['right'] = ' ', '"', '' elif ele.get_text() == '"" """ " "': e['left'], e['text'], e['right'] = '', '"', ' ' elif ele.get_text() == '"" """ ""': e['left'], e['text'], e['right'] = '', '"', '' elif ele.get_text() == '" " """ " "': e['left'], e['text'], e['right'] = ' ', '"', ' ' elif ele.get_text() == '" " """ ""': e['left'], e['text'], e['right'] = ' ', '"', '' elif ele.get_text() == '"" """ " "': e['left'], e['text'], e['right'] = '', '"', ' ' elif ele.get_text() == '"" """ " "': e['left'], e['text'], e['right'] = '', '"', ' ' elif ele.get_text() == '"" "\\" ""': e['left'], e['text'], e['right'] = '', '\\', '' elif ele.get_text().count('"') != 6: e['left'], e['text'], e['right'] = '', '"', '' else: e['left'], e['text'], e['right'] = shlex.split(ele.get_text()) sent['tokens'].append(e) def get_dep(text): rel, left = text.split('(') src, left = left.split(', ') dest = left.split(')')[0] src, src_id = src[:src.rfind('-')], src[src.rfind('-')+1:] dest, dest_id = dest[:dest.rfind('-')], dest[dest.rfind('-')+1:] return rel, src, int(src_id), dest, int(dest_id) for ele in entry.find('deps').get_text().split('\n'): if ele == '': continue e = {} if len(list(parse('{}({}-{}, {}-{})', ele))) == 5: e['rel'], e['src'], e['src_id'], e['dest'], e['dest_id'] = parse('{}({}-{}, {}-{})', ele) elif len(re.findall('\w+', ele)) == 5: e['rel'], e['src'], e['src_id'], e['dest'], e['dest_id'] = re.findall('\w+', ele) else: e['rel'], e['src'], e['src_id'], e['dest'], e['dest_id'] = get_dep(ele) sent['deps'].append(e) for ele in entry.find('events').find_all('event'): e = {} e['tok_id'] = ele['offset'] e['eid'] = ele['id'] e['text'] = ele['string'] e['tense'] = ele['tense'] e['class'] = ele['class'] e['polarity'] = ele['polarity'] sent['events'].append(e) for ele in entry.find('timexes').find_all('timex'): e = {} e['tok_id'] = ele['offset'] e['tid'] = ele['tid'] e['text'] = ele['text'] e['length'] = ele['length'] e['type'] = ele['type'] e['value'] = ele['value'] sent['times'].append(e) doc['sentences'].append(sent) for link in soup.find_all('tlink'): e = {} e['relType'] = link['relation'] e['src'], e['dest'] = link['event1'].replace('i',''), link['event2'].replace('i','') e['type'] = link['type'] doc['links'].append(e) return doc except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.src.split(exc_tb.tb_frame.f_code.co_filename)[1] print('\nException Type: {}, \nCause: {}, \nfname: {}, \nline_no: {}'.format(exc_type, e.args[0], fname, exc_tb.tb_lineno)) print('{}|{}'.format(ele)) return {} if __name__== "__main__": parser = argparse.ArgumentParser(description='Adjust input for CATENA') parser.add_argument('-src', dest='src', default='test_in.xml', help='Path of output of CAEVO') parser.add_argument('-dest', dest='dest', default='test_out.xml', help='Destination to dump the tokenized document and dependency graph') args = parser.parse_args() res = read_xml(args.src) open(args.dest, 'w').write(json.dumps(res))
import argparse, os, sys, pdb, shlex, re, argparse from parse import parse from bs4 import BeautifulSoup from pprint import pprint from joblib import Parallel, delayed from pymongo import MongoClient from pymongo.errors import BulkWriteError """ Reads the output of CAEVO (xml files) and extracts teh depdendency tree edges and the tokenized document. """ def read_xml(src): text = open(src).read() soup = BeautifulSoup(text, 'html.parser') try: doc = { '_id': soup.find('file')['name'], 'sentences': [], 'links': [] } for entry in soup.find_all('entry'): sent = { 'tokens': [], 'deps': [], 'events': [], 'times': [] } for ele in entry.find('tokens').find_all('t'): e = {} if ele.get_text() == '" " """ ""': e['left'], e['text'], e['right'] = ' ', '"', '' elif ele.get_text() == '"" """ " "': e['left'], e['text'], e['right'] = '', '"', ' ' elif ele.get_text() == '"" """ ""': e['left'], e['text'], e['right'] = '', '"', '' elif ele.get_text() == '" " """ " "': e['left'], e['text'], e['right'] = ' ', '"', ' ' elif ele.get_text() == '" " """ ""': e['left'], e['text'], e['right'] = ' ', '"', '' elif ele.get_text() == '"" """ " "': e['left'], e['text'], e['right'] = '', '"', ' ' elif ele.get_text() == '"" """ " "': e['left'], e['text'], e['right'] = '', '"', ' ' elif ele.get_text() == '"" "\\" ""': e['left'], e['text'], e['right'] = '', '\\', '' elif ele.get_text().count('"') != 6: e['left'], e['text'], e['right'] = '', '"', '' else: e['left'], e['text'], e['right'] = shlex.split(ele.get_text()) sent['tokens'].append(e) def get_dep(text): rel, left = text.split('(') src, left = left.split(', ') dest = left.split(')')[0] src, src_id = src[:src.rfind('-')], src[src.rfind('-')+1:] dest, dest_id = dest[:dest.rfind('-')], dest[dest.rfind('-')+1:] return rel, src, int(src_id), dest, int(dest_id) for ele in entry.find('deps').get_text().split('\n'): if ele == '': continue e = {} if len(list(parse('{}({}-{}, {}-{})', ele))) == 5: e['rel'], e['src'], e['src_id'], e['dest'], e['dest_id'] = parse('{}({}-{}, {}-{})', ele) elif len(re.findall('\w+', ele)) == 5: e['rel'], e['src'], e['src_id'], e['dest'], e['dest_id'] = re.findall('\w+', ele) else: e['rel'], e['src'], e['src_id'], e['dest'], e['dest_id'] = get_dep(ele) sent['deps'].append(e) for ele in entry.find('events').find_all('event'): e = {} e['tok_id'] = ele['offset'] e['eid'] = ele['id'] e['text'] = ele['string'] e['tense'] = ele['tense'] e['class'] = ele['class'] e['polarity'] = ele['polarity'] sent['events'].append(e) for ele in entry.find('timexes').find_all('timex'): e = {} e['tok_id'] = ele['offset'] e['tid'] = ele['tid'] e['text'] = ele['text'] e['length'] = ele['length'] e['type'] = ele['type'] e['value'] = ele['value'] sent['times'].append(e) doc['sentences'].append(sent) for link in soup.find_all('tlink'): e = {} e['relType'] = link['relation'] e['src'], e['dest'] = link['event1'].replace('i',''), link['event2'].replace('i','') e['type'] = link['type'] doc['links'].append(e) return doc except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.src.split(exc_tb.tb_frame.f_code.co_filename)[1] print('\nException Type: {}, \nCause: {}, \nfname: {}, \nline_no: {}'.format(exc_type, e.args[0], fname, exc_tb.tb_lineno)) print('{}|{}'.format(ele)) return {} if __name__== "__main__": parser = argparse.ArgumentParser(description='Adjust input for CATENA') parser.add_argument('-src', dest='src', default='test_in.xml', help='Path of output of CAEVO') parser.add_argument('-dest', dest='dest', default='test_out.xml', help='Destination to dump the tokenized document and dependency graph') args = parser.parse_args() res = read_xml(args.src) open(args.dest, 'w').write(json.dumps(res))
en
0.220833
Reads the output of CAEVO (xml files) and extracts teh depdendency tree edges and the tokenized document. ""': e['left'], e['text'], e['right'] = ' ', '"', '' elif ele.get_text() == '"" ""': e['left'], e['text'], e['right'] = '', '"', '' elif ele.get_text() == '" " ""': e['left'], e['text'], e['right'] = ' ', '"', '' elif ele.get_text() == '""
2.523061
3
INBa/2015/Primov_G_S/task_4_19.py
YukkaSarasti/pythonintask
0
6617557
#Задача 4. Вариант 19 #Напишите программу, которая выводит имя, под которым скрывается <NAME>. Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти). Для хранения всех необходимых данных требуется использовать переменные. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода. #<NAME>. #21.02.2016 print("<NAME> более известен, как американский актёр Дейл") m = 'Де-Калб, Иллинойс, США' g = 1947 v = 68 o = 'Кино' print("Место рождения: " + m) print("Год рождения: ", g) print("Возраст: ", v) print("Область интересов: " + o) input("\n\nНажмите Enter")
#Задача 4. Вариант 19 #Напишите программу, которая выводит имя, под которым скрывается <NAME>. Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти). Для хранения всех необходимых данных требуется использовать переменные. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода. #<NAME>. #21.02.2016 print("<NAME> более известен, как американский актёр Дейл") m = 'Де-Калб, Иллинойс, США' g = 1947 v = 68 o = 'Кино' print("Место рождения: " + m) print("Год рождения: ", g) print("Возраст: ", v) print("Область интересов: " + o) input("\n\nНажмите Enter")
ru
0.994807
#Задача 4. Вариант 19 #Напишите программу, которая выводит имя, под которым скрывается <NAME>. Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти). Для хранения всех необходимых данных требуется использовать переменные. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода. #<NAME>. #21.02.2016
3.437579
3
hideintopng/encrypter.py
HugoJH/HideIntoPNG
2
6617558
<reponame>HugoJH/HideIntoPNG from Crypto import Random from Crypto.Cipher import AES import hashlib class Encrypter: AES_ALIGNMENT = 16 ITERATIONS = 42 SALT_SIZE = 16 def encryptData(self, data, passphrase): salt = Random.get_random_bytes(self.SALT_SIZE) key = self._generateKey(bytearray(passphrase.encode('utf-8')) + bytearray(salt), self.ITERATIONS) cipher = AES.new(key, AES.MODE_ECB) paddedClearData = self._padData(data, self.AES_ALIGNMENT) encryptedData = cipher.encrypt(paddedClearData) return salt + encryptedData def decryptData(self, encryptedData, passphrase): salt = encryptedData[0:self.SALT_SIZE] unsaltedEncryptedData = encryptedData[self.SALT_SIZE:] key = self._generateKey(bytearray(passphrase.encode('utf-8')) + bytearray(salt), self.ITERATIONS) cipher = AES.new(key, AES.MODE_ECB) return self._unpadData(cipher.decrypt(unsaltedEncryptedData)) def _padData(self, data, alignment): padding_size = alignment - len(data) % alignment padding = bytes([padding_size]) * padding_size return data + padding def _unpadData(self, paddedData): paddingSize = paddedData[-1] return paddedData[:-paddingSize] def _generateKey(self, saltedPassPhrase, iterations): assert iterations > 0 for i in range(iterations): saltedPassPhrase = hashlib.sha256(saltedPassPhrase).digest() return saltedPassPhrase
from Crypto import Random from Crypto.Cipher import AES import hashlib class Encrypter: AES_ALIGNMENT = 16 ITERATIONS = 42 SALT_SIZE = 16 def encryptData(self, data, passphrase): salt = Random.get_random_bytes(self.SALT_SIZE) key = self._generateKey(bytearray(passphrase.encode('utf-8')) + bytearray(salt), self.ITERATIONS) cipher = AES.new(key, AES.MODE_ECB) paddedClearData = self._padData(data, self.AES_ALIGNMENT) encryptedData = cipher.encrypt(paddedClearData) return salt + encryptedData def decryptData(self, encryptedData, passphrase): salt = encryptedData[0:self.SALT_SIZE] unsaltedEncryptedData = encryptedData[self.SALT_SIZE:] key = self._generateKey(bytearray(passphrase.encode('utf-8')) + bytearray(salt), self.ITERATIONS) cipher = AES.new(key, AES.MODE_ECB) return self._unpadData(cipher.decrypt(unsaltedEncryptedData)) def _padData(self, data, alignment): padding_size = alignment - len(data) % alignment padding = bytes([padding_size]) * padding_size return data + padding def _unpadData(self, paddedData): paddingSize = paddedData[-1] return paddedData[:-paddingSize] def _generateKey(self, saltedPassPhrase, iterations): assert iterations > 0 for i in range(iterations): saltedPassPhrase = hashlib.sha256(saltedPassPhrase).digest() return saltedPassPhrase
none
1
3.2967
3
lib/data/accidents/scrappers.py
thomas-marquis/datascience-securite-routiere
0
6617559
from typing import Dict from lib.constant import Datasets urls = { Datasets.CARACS: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/a47866f7-ece1-4de8-8d31-3a1b4f477e08', 2019: 'https://www.data.gouv.fr/fr/datasets/r/e22ba475-45a3-46ac-a0f7-9ca9ed1e283a', 2018: 'https://www.data.gouv.fr/fr/datasets/r/6eee0852-cbd7-447e-bd70-37c433029405', 2017: 'https://www.data.gouv.fr/fr/datasets/r/9a7d408b-dd72-4959-ae7d-c854ec505354', 2016: 'https://www.data.gouv.fr/fr/datasets/r/96aadc9f-0b55-4e9a-a70e-c627ed97e6f7', }, Datasets.LOCATIONS: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/3a3488e0-86a1-4917-b082-f3bdc25f6922', 2019: 'https://www.data.gouv.fr/fr/datasets/r/2ad65965-36a1-4452-9c08-61a6c874e3e6', 2018: 'https://www.data.gouv.fr/fr/datasets/r/d9d65ca1-16a3-4ea3-b7c8-2412c92b69d9', 2017: 'https://www.data.gouv.fr/fr/datasets/r/9b76a7b6-3eef-4864-b2da-1834417e305c', 2016: 'https://www.data.gouv.fr/fr/datasets/r/08b77510-39c4-4761-bf02-19457264790f', }, Datasets.VEHICLES: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/924b962b-4400-4468-9f7d-0bdba28f51e9', 2019: 'https://www.data.gouv.fr/fr/datasets/r/780cd335-5048-4bd6-a841-105b44eb2667', 2018: 'https://www.data.gouv.fr/fr/datasets/r/b4aaeede-1a80-4d76-8f97-543dad479167', 2017: 'https://www.data.gouv.fr/fr/datasets/r/d6103d0c-6db5-466f-b724-91cbea521533', 2016: 'https://www.data.gouv.fr/fr/datasets/r/be2191a6-a7cd-446f-a9fc-8d698688eb9e', }, Datasets.USERS: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/cecdbd46-11f2-41fa-b0bd-e6e223de6b3c', 2019: 'https://www.data.gouv.fr/fr/datasets/r/36b1b7b3-84b4-4901-9163-59ae8a9e3028', 2018: 'https://www.data.gouv.fr/fr/datasets/r/72b251e1-d5e1-4c46-a1c2-c65f1b26549a', 2017: 'https://www.data.gouv.fr/fr/datasets/r/07bfe612-0ad9-48ef-92d3-f5466f8465fe', 2016: 'https://www.data.gouv.fr/fr/datasets/r/e4c6f4fe-7c68-4a1d-9bb6-b0f1f5d45526', }, } def get_urls_by_dataset() -> Dict[str, Dict[int, str]]: """Scrap data.gouv and return url dict. returned dict structure:: { 'dataset_name': { XXXX: 'url' } } **dataset_name** in `['carac', 'lieux', 'usag', 'veh']` **XXXX** year int """ return urls
from typing import Dict from lib.constant import Datasets urls = { Datasets.CARACS: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/a47866f7-ece1-4de8-8d31-3a1b4f477e08', 2019: 'https://www.data.gouv.fr/fr/datasets/r/e22ba475-45a3-46ac-a0f7-9ca9ed1e283a', 2018: 'https://www.data.gouv.fr/fr/datasets/r/6eee0852-cbd7-447e-bd70-37c433029405', 2017: 'https://www.data.gouv.fr/fr/datasets/r/9a7d408b-dd72-4959-ae7d-c854ec505354', 2016: 'https://www.data.gouv.fr/fr/datasets/r/96aadc9f-0b55-4e9a-a70e-c627ed97e6f7', }, Datasets.LOCATIONS: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/3a3488e0-86a1-4917-b082-f3bdc25f6922', 2019: 'https://www.data.gouv.fr/fr/datasets/r/2ad65965-36a1-4452-9c08-61a6c874e3e6', 2018: 'https://www.data.gouv.fr/fr/datasets/r/d9d65ca1-16a3-4ea3-b7c8-2412c92b69d9', 2017: 'https://www.data.gouv.fr/fr/datasets/r/9b76a7b6-3eef-4864-b2da-1834417e305c', 2016: 'https://www.data.gouv.fr/fr/datasets/r/08b77510-39c4-4761-bf02-19457264790f', }, Datasets.VEHICLES: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/924b962b-4400-4468-9f7d-0bdba28f51e9', 2019: 'https://www.data.gouv.fr/fr/datasets/r/780cd335-5048-4bd6-a841-105b44eb2667', 2018: 'https://www.data.gouv.fr/fr/datasets/r/b4aaeede-1a80-4d76-8f97-543dad479167', 2017: 'https://www.data.gouv.fr/fr/datasets/r/d6103d0c-6db5-466f-b724-91cbea521533', 2016: 'https://www.data.gouv.fr/fr/datasets/r/be2191a6-a7cd-446f-a9fc-8d698688eb9e', }, Datasets.USERS: { 2005: 'https://www.data.gouv.fr/fr/datasets/r/cecdbd46-11f2-41fa-b0bd-e6e223de6b3c', 2019: 'https://www.data.gouv.fr/fr/datasets/r/36b1b7b3-84b4-4901-9163-59ae8a9e3028', 2018: 'https://www.data.gouv.fr/fr/datasets/r/72b251e1-d5e1-4c46-a1c2-c65f1b26549a', 2017: 'https://www.data.gouv.fr/fr/datasets/r/07bfe612-0ad9-48ef-92d3-f5466f8465fe', 2016: 'https://www.data.gouv.fr/fr/datasets/r/e4c6f4fe-7c68-4a1d-9bb6-b0f1f5d45526', }, } def get_urls_by_dataset() -> Dict[str, Dict[int, str]]: """Scrap data.gouv and return url dict. returned dict structure:: { 'dataset_name': { XXXX: 'url' } } **dataset_name** in `['carac', 'lieux', 'usag', 'veh']` **XXXX** year int """ return urls
en
0.180942
Scrap data.gouv and return url dict. returned dict structure:: { 'dataset_name': { XXXX: 'url' } } **dataset_name** in `['carac', 'lieux', 'usag', 'veh']` **XXXX** year int
1.856072
2
bugle_project/twitter_api/models.py
simonw/bugle_project
8
6617560
<filename>bugle_project/twitter_api/models.py<gh_stars>1-10 from django.contrib.auth.models import User from django.core.files.base import ContentFile from django.db import models import md5 from PIL import Image import StringIO class TwitterProfile(models.Model): user = models.OneToOneField(User, related_name='twitter_profile') profile_image = models.ImageField(upload_to='profile-images/') def save(self, *args, **kwargs): if not self.profile_image: im = Image.new('RGB', (48, 48), '#%s' % md5.new(str(self.user)).hexdigest()[:6]) output = StringIO.StringIO() im.save(output, format='PNG') self.profile_image.save( '%s.png' % self.user.id, ContentFile(output.getvalue()), save=False, ) super(TwitterProfile, self).save(*args, **kwargs)
<filename>bugle_project/twitter_api/models.py<gh_stars>1-10 from django.contrib.auth.models import User from django.core.files.base import ContentFile from django.db import models import md5 from PIL import Image import StringIO class TwitterProfile(models.Model): user = models.OneToOneField(User, related_name='twitter_profile') profile_image = models.ImageField(upload_to='profile-images/') def save(self, *args, **kwargs): if not self.profile_image: im = Image.new('RGB', (48, 48), '#%s' % md5.new(str(self.user)).hexdigest()[:6]) output = StringIO.StringIO() im.save(output, format='PNG') self.profile_image.save( '%s.png' % self.user.id, ContentFile(output.getvalue()), save=False, ) super(TwitterProfile, self).save(*args, **kwargs)
none
1
2.351259
2
venv/lib/python2.7/site-packages/opbeat/conf/__init__.py
CharleyFarley/ovvio
99
6617561
""" opbeat.conf ~~~~~~~~~~ :copyright: (c) 2011-2012 Opbeat Large portions are :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import logging __all__ = ('setup_logging', ) def setup_logging(handler, exclude=['opbeat', 'gunicorn', 'south', 'opbeat.errors']): """ Configures logging to pipe to Opbeat. - ``exclude`` is a list of loggers that shouldn't go to Opbeat. For a typical Python install: >>> from opbeat.handlers.logging import OpbeatHandler >>> client = Opbeat(...) >>> setup_logging(OpbeatHandler(client)) Within Django: >>> from opbeat.contrib.django.logging import OpbeatHandler >>> setup_logging(OpbeatHandler()) Returns a boolean based on if logging was configured or not. """ logger = logging.getLogger() if handler.__class__ in map(type, logger.handlers): return False logger.addHandler(handler) # Add StreamHandler to sentry's default so you can catch missed exceptions for logger_name in exclude: logger = logging.getLogger(logger_name) logger.propagate = False logger.addHandler(logging.StreamHandler()) return True
""" opbeat.conf ~~~~~~~~~~ :copyright: (c) 2011-2012 Opbeat Large portions are :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import logging __all__ = ('setup_logging', ) def setup_logging(handler, exclude=['opbeat', 'gunicorn', 'south', 'opbeat.errors']): """ Configures logging to pipe to Opbeat. - ``exclude`` is a list of loggers that shouldn't go to Opbeat. For a typical Python install: >>> from opbeat.handlers.logging import OpbeatHandler >>> client = Opbeat(...) >>> setup_logging(OpbeatHandler(client)) Within Django: >>> from opbeat.contrib.django.logging import OpbeatHandler >>> setup_logging(OpbeatHandler()) Returns a boolean based on if logging was configured or not. """ logger = logging.getLogger() if handler.__class__ in map(type, logger.handlers): return False logger.addHandler(handler) # Add StreamHandler to sentry's default so you can catch missed exceptions for logger_name in exclude: logger = logging.getLogger(logger_name) logger.propagate = False logger.addHandler(logging.StreamHandler()) return True
en
0.671742
opbeat.conf ~~~~~~~~~~ :copyright: (c) 2011-2012 Opbeat Large portions are :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. Configures logging to pipe to Opbeat. - ``exclude`` is a list of loggers that shouldn't go to Opbeat. For a typical Python install: >>> from opbeat.handlers.logging import OpbeatHandler >>> client = Opbeat(...) >>> setup_logging(OpbeatHandler(client)) Within Django: >>> from opbeat.contrib.django.logging import OpbeatHandler >>> setup_logging(OpbeatHandler()) Returns a boolean based on if logging was configured or not. # Add StreamHandler to sentry's default so you can catch missed exceptions
1.751394
2
ml-supervised-regression-with-scaling.py
rsjain1978/ml-supervised-regression
0
6617562
from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt dataset=pd.read_csv('./marketing-unscaled.csv') df = pd.DataFrame(dataset, columns=['Marketing Spend','Revenue']) X = df[['Marketing Spend']] Y = df[['Revenue']] X=X.values.reshape(-1,1) #split the data as training and test data. train_X, test_X, train_Y, test_Y = train_test_split(X,Y, test_size=0.2) #initialize scaler object scaler = StandardScaler() #scale the training X data train_scaled_X = scaler.fit_transform(train_X) #scale the training Y data train_scaled_Y = scaler.transform(train_Y) regressor = LinearRegression() regressor.fit(train_scaled_X, train_scaled_Y) print (regressor.coef_) #check the predicted value from testing X data for testX in test_X: testX = np.asscalar(testX) print ('X='+format(testX)+'\t Y^=%2f'%regressor.predict(testX))
from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt dataset=pd.read_csv('./marketing-unscaled.csv') df = pd.DataFrame(dataset, columns=['Marketing Spend','Revenue']) X = df[['Marketing Spend']] Y = df[['Revenue']] X=X.values.reshape(-1,1) #split the data as training and test data. train_X, test_X, train_Y, test_Y = train_test_split(X,Y, test_size=0.2) #initialize scaler object scaler = StandardScaler() #scale the training X data train_scaled_X = scaler.fit_transform(train_X) #scale the training Y data train_scaled_Y = scaler.transform(train_Y) regressor = LinearRegression() regressor.fit(train_scaled_X, train_scaled_Y) print (regressor.coef_) #check the predicted value from testing X data for testX in test_X: testX = np.asscalar(testX) print ('X='+format(testX)+'\t Y^=%2f'%regressor.predict(testX))
en
0.712668
#split the data as training and test data. #initialize scaler object #scale the training X data #scale the training Y data #check the predicted value from testing X data
3.66802
4
library/cluster_test.py
ianmiell/shutit-openshift-cluster
23
6617563
import logging import time def test_cluster(shutit, shutit_sessions, shutit_master1_session, test_config_module): #for machine in test_config_module.machines.keys(): # shutit_session = shutit_sessions[machine] # shutit_session.send('cd /etc/sysconfig') # shutit_session.send(r'''for f in $(ls origin*); do sed -i 's/OPTIONS=.*--loglevel=.*/OPTIONS="--loglevel=8"/' $f; done''') # shutit_session.send(r'''systemctl restart origin-master-api''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-master-controllers''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-master''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-node''', check_exit=False) # shutit_session.send('cd -') #shutit_master1_session.send('sleep 600') shutit_session = shutit_master1_session # Create a mysql application if shutit_session.send_and_get_output('oc get projects | grep mysql | wc -l') == '0': shutit_session.send('oc adm new-project mysql') ok = False while not ok: count = 80 shutit.log('Iterations left: ' + str(count),level=logging.INFO) # Destroy all... shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete svc mysql -n mysql', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete dc mysql -n mysql', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete is mysql -n mysql', check_exit=False) shutit_session.send('''oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep mysql | awk '{print $1}' | xargs -n1 oc --config=/etc/origin/master/admin.kubeconfig delete pod -n mysql || true''') # --allow-missing-images has been seen to be needed very occasionally. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig new-app -e=MYSQL_ROOT_PASSWORD=root mysql --allow-missing-images -n mysql') #if not check_app('mysql','mysql', '80', '15'): # shutit_session.pause_point('mysql app did not start correctly') while True: if count == 0: break count -= 1 # Sometimes terminating containers don't go away quickly. status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql- | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1""") if status == 'Running': ok = True break elif status == 'Error': break elif status == 'ImagePullBackOff': shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy mysql --cancel -n mysql || oc --config=/etc/origin/master/admin.kubeconfig rollout cancel dc/mysql -n mysql') shutit_session.send('sleep 15') shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy mysql --retry -n mysql || oc --config=/etc/origin/master/admin.kubeconfig deploy mysql --latest -n mysql || oc --config=/etc/origin/master/admin.kubeconfig rollout retry dc/mysql -n mysql || oc --config=/etc/origin/master/admin.kubeconfig rollout latest dc/mysql -n mysql') # Check on deployment deploy_status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql- | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1""") # If deploy has errored... if deploy_status == 'Error': # Try and rollout latest, ensure it's been cancelled and roll out again. shutit_session.send('oc rollout cancel dc/mysql -n mysql', check_exit=False) shutit_session.send('oc rollout latest dc/mysql -n mysql') # For debug/info purposes. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql',check_exit=False) shutit_session.send('sleep 15') podname = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep mysql | grep -v deploy | awk '{print $1}' | tail -1""") # exec and check hosts google.com and kubernetes.default.svc.cluster.local # ping has been removed! if shutit_session.send_and_get_output('oc get projects | grep net | wc -l') == '0': shutit_session.send('oc adm new-project net') ok = False while not ok: count = 80 shutit.log('Iterations left: ' + str(count),level=logging.INFO) # Destroy all... shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete svc net-tools -n net', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete dc net-tools -n net', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete is net-tools -n net', check_exit=False) shutit_session.send('''oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep net-tools | awk '{print $1}' | xargs -n1 oc --config=/etc/origin/master/admin.kubeconfig delete pod -n net || true''') # --allow-missing-images has been seen to be needed very occasionally. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig new-app imiell/net-tools --allow-missing-images -n net') while True: if count == 0: break count -= 1 # Sometimes terminating containers don't go away quickly. status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1""") if status == 'Running': ok = True break elif status == 'Error': break elif status == 'ImagePullBackOff': shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy net-tools --cancel -n net || oc --config=/etc/origin/master/admin.kubeconfig rollout cancel dc/net-tools -n net') shutit_session.send('sleep 15') shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy net-tools --retry -n net || oc --config=/etc/origin/master/admin.kubeconfig deploy net-tools --latest -n net || oc --config=/etc/origin/master/admin.kubeconfig rollout retry dc/net-tools -n net || oc --config=/etc/origin/master/admin.kubeconfig rollout latest dc/net-tools -n net') # Check on deployment deploy_status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1""") # If deploy has errored... if deploy_status == 'Error': # Try and rollout latest, ensure it's been cancelled and roll out again. shutit_session.send('oc rollout cancel dc/net-tools -n net', check_exit=False) shutit_session.send('oc rollout latest dc/net-tools -n net') # For debug/info purposes. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools',check_exit=False) shutit_session.send('sleep 15') time.sleep(30) # pause to allow resolve to work ok podname = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep net-tools | grep -v deploy | awk '{print $1}' | tail -1""") shutit_session.send("""oc --config=/etc/origin/master/admin.kubeconfig -n net exec -ti """ + podname + """ -- /bin/sh -c 'host google.com'""") for addr in ('kubernetes.default.svc','kubernetes.default.svc.cluster.local'): if shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig -n net-tools exec -ti """ + podname + """ -- /bin/sh -c 'host """ + addr + """ -s'""") != '172.30.0.1': shutit_session.pause_point('kubernetes.default.svc.cluster.local did not resolve correctly') def diagnostic_tests(shutit_session): #test_list also includes: AggregatedLogging MetricsApiProxy NetworkCheck test_list = ('AnalyzeLogs', 'ClusterRegistry', 'ClusterRoleBindings', 'ClusterRoles', 'ClusterRouter', 'ConfigContexts', 'DiagnosticPod', 'MasterConfigCheck', 'MasterNode', 'NodeConfigCheck', 'NodeDefinitions', 'ServiceExternalIPs', 'UnitStatus') for test in test_list: shutit_session.send('oc adm diagnostics ' + test) def check_app(namespace, appname, iters, sleep): count = iters while True: if count == 0: break count -= 1 # Sometimes terminating containers don't go away quickly. status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n """ + namespace + """ | grep ^""" + appname + """- | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1""") if status == 'Running': ok = True return True elif status == 'Error': break elif status == 'ImagePullBackOff': shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy ' + appname + ' --cancel -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig rollout cancel dc/' + appname + ' -n ' + namespace) shutit_session.send('sleep 15') shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy ' + appname + ' --retry -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig deploy ' + appname + ' --latest -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig rollout retry dc/' + appname + ' -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig rollout latest dc/' + appname + ' -n ' + namespace) # Check on deployment deploy_status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n """ + namespace + """ | grep ^""" + appname + """- | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1""") # If deploy has errored... if deploy_status == 'Error': # Try and rollout latest, ensure it's been cancelled and roll out again. shutit_session.send('oc rollout cancel dc/' + appname + ' -n ' + namespace, check_exit=False) shutit_session.send('oc rollout latest dc/' + appname + ' -n ' + namespace) # For debug/info purposes. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig get pods -n ' + namespace + ' | grep ^' + appname + '-',check_exit=False) shutit_session.send('sleep ' + sleep) return False
import logging import time def test_cluster(shutit, shutit_sessions, shutit_master1_session, test_config_module): #for machine in test_config_module.machines.keys(): # shutit_session = shutit_sessions[machine] # shutit_session.send('cd /etc/sysconfig') # shutit_session.send(r'''for f in $(ls origin*); do sed -i 's/OPTIONS=.*--loglevel=.*/OPTIONS="--loglevel=8"/' $f; done''') # shutit_session.send(r'''systemctl restart origin-master-api''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-master-controllers''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-master''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-node''', check_exit=False) # shutit_session.send('cd -') #shutit_master1_session.send('sleep 600') shutit_session = shutit_master1_session # Create a mysql application if shutit_session.send_and_get_output('oc get projects | grep mysql | wc -l') == '0': shutit_session.send('oc adm new-project mysql') ok = False while not ok: count = 80 shutit.log('Iterations left: ' + str(count),level=logging.INFO) # Destroy all... shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete svc mysql -n mysql', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete dc mysql -n mysql', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete is mysql -n mysql', check_exit=False) shutit_session.send('''oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep mysql | awk '{print $1}' | xargs -n1 oc --config=/etc/origin/master/admin.kubeconfig delete pod -n mysql || true''') # --allow-missing-images has been seen to be needed very occasionally. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig new-app -e=MYSQL_ROOT_PASSWORD=root mysql --allow-missing-images -n mysql') #if not check_app('mysql','mysql', '80', '15'): # shutit_session.pause_point('mysql app did not start correctly') while True: if count == 0: break count -= 1 # Sometimes terminating containers don't go away quickly. status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql- | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1""") if status == 'Running': ok = True break elif status == 'Error': break elif status == 'ImagePullBackOff': shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy mysql --cancel -n mysql || oc --config=/etc/origin/master/admin.kubeconfig rollout cancel dc/mysql -n mysql') shutit_session.send('sleep 15') shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy mysql --retry -n mysql || oc --config=/etc/origin/master/admin.kubeconfig deploy mysql --latest -n mysql || oc --config=/etc/origin/master/admin.kubeconfig rollout retry dc/mysql -n mysql || oc --config=/etc/origin/master/admin.kubeconfig rollout latest dc/mysql -n mysql') # Check on deployment deploy_status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql- | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1""") # If deploy has errored... if deploy_status == 'Error': # Try and rollout latest, ensure it's been cancelled and roll out again. shutit_session.send('oc rollout cancel dc/mysql -n mysql', check_exit=False) shutit_session.send('oc rollout latest dc/mysql -n mysql') # For debug/info purposes. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql',check_exit=False) shutit_session.send('sleep 15') podname = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep mysql | grep -v deploy | awk '{print $1}' | tail -1""") # exec and check hosts google.com and kubernetes.default.svc.cluster.local # ping has been removed! if shutit_session.send_and_get_output('oc get projects | grep net | wc -l') == '0': shutit_session.send('oc adm new-project net') ok = False while not ok: count = 80 shutit.log('Iterations left: ' + str(count),level=logging.INFO) # Destroy all... shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete svc net-tools -n net', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete dc net-tools -n net', check_exit=False) shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig delete is net-tools -n net', check_exit=False) shutit_session.send('''oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep net-tools | awk '{print $1}' | xargs -n1 oc --config=/etc/origin/master/admin.kubeconfig delete pod -n net || true''') # --allow-missing-images has been seen to be needed very occasionally. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig new-app imiell/net-tools --allow-missing-images -n net') while True: if count == 0: break count -= 1 # Sometimes terminating containers don't go away quickly. status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1""") if status == 'Running': ok = True break elif status == 'Error': break elif status == 'ImagePullBackOff': shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy net-tools --cancel -n net || oc --config=/etc/origin/master/admin.kubeconfig rollout cancel dc/net-tools -n net') shutit_session.send('sleep 15') shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy net-tools --retry -n net || oc --config=/etc/origin/master/admin.kubeconfig deploy net-tools --latest -n net || oc --config=/etc/origin/master/admin.kubeconfig rollout retry dc/net-tools -n net || oc --config=/etc/origin/master/admin.kubeconfig rollout latest dc/net-tools -n net') # Check on deployment deploy_status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1""") # If deploy has errored... if deploy_status == 'Error': # Try and rollout latest, ensure it's been cancelled and roll out again. shutit_session.send('oc rollout cancel dc/net-tools -n net', check_exit=False) shutit_session.send('oc rollout latest dc/net-tools -n net') # For debug/info purposes. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools',check_exit=False) shutit_session.send('sleep 15') time.sleep(30) # pause to allow resolve to work ok podname = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep net-tools | grep -v deploy | awk '{print $1}' | tail -1""") shutit_session.send("""oc --config=/etc/origin/master/admin.kubeconfig -n net exec -ti """ + podname + """ -- /bin/sh -c 'host google.com'""") for addr in ('kubernetes.default.svc','kubernetes.default.svc.cluster.local'): if shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig -n net-tools exec -ti """ + podname + """ -- /bin/sh -c 'host """ + addr + """ -s'""") != '172.30.0.1': shutit_session.pause_point('kubernetes.default.svc.cluster.local did not resolve correctly') def diagnostic_tests(shutit_session): #test_list also includes: AggregatedLogging MetricsApiProxy NetworkCheck test_list = ('AnalyzeLogs', 'ClusterRegistry', 'ClusterRoleBindings', 'ClusterRoles', 'ClusterRouter', 'ConfigContexts', 'DiagnosticPod', 'MasterConfigCheck', 'MasterNode', 'NodeConfigCheck', 'NodeDefinitions', 'ServiceExternalIPs', 'UnitStatus') for test in test_list: shutit_session.send('oc adm diagnostics ' + test) def check_app(namespace, appname, iters, sleep): count = iters while True: if count == 0: break count -= 1 # Sometimes terminating containers don't go away quickly. status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n """ + namespace + """ | grep ^""" + appname + """- | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1""") if status == 'Running': ok = True return True elif status == 'Error': break elif status == 'ImagePullBackOff': shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy ' + appname + ' --cancel -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig rollout cancel dc/' + appname + ' -n ' + namespace) shutit_session.send('sleep 15') shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig deploy ' + appname + ' --retry -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig deploy ' + appname + ' --latest -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig rollout retry dc/' + appname + ' -n ' + namespace + ' || oc --config=/etc/origin/master/admin.kubeconfig rollout latest dc/' + appname + ' -n ' + namespace) # Check on deployment deploy_status = shutit_session.send_and_get_output("""oc --config=/etc/origin/master/admin.kubeconfig get pods -n """ + namespace + """ | grep ^""" + appname + """- | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1""") # If deploy has errored... if deploy_status == 'Error': # Try and rollout latest, ensure it's been cancelled and roll out again. shutit_session.send('oc rollout cancel dc/' + appname + ' -n ' + namespace, check_exit=False) shutit_session.send('oc rollout latest dc/' + appname + ' -n ' + namespace) # For debug/info purposes. shutit_session.send('oc --config=/etc/origin/master/admin.kubeconfig get pods -n ' + namespace + ' | grep ^' + appname + '-',check_exit=False) shutit_session.send('sleep ' + sleep) return False
en
0.590131
#for machine in test_config_module.machines.keys(): # shutit_session = shutit_sessions[machine] # shutit_session.send('cd /etc/sysconfig') # shutit_session.send(r'''for f in $(ls origin*); do sed -i 's/OPTIONS=.*--loglevel=.*/OPTIONS="--loglevel=8"/' $f; done''') # shutit_session.send(r'''systemctl restart origin-master-api''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-master-controllers''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-master''', check_exit=False) # shutit_session.send(r'''systemctl restart origin-node''', check_exit=False) # shutit_session.send('cd -') #shutit_master1_session.send('sleep 600') # Create a mysql application # Destroy all... oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep mysql | awk '{print $1}' | xargs -n1 oc --config=/etc/origin/master/admin.kubeconfig delete pod -n mysql || true # --allow-missing-images has been seen to be needed very occasionally. #if not check_app('mysql','mysql', '80', '15'): # shutit_session.pause_point('mysql app did not start correctly') # Sometimes terminating containers don't go away quickly. oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql- | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1 # Check on deployment oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep ^mysql- | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1 # If deploy has errored... # Try and rollout latest, ensure it's been cancelled and roll out again. # For debug/info purposes. oc --config=/etc/origin/master/admin.kubeconfig get pods -n mysql | grep mysql | grep -v deploy | awk '{print $1}' | tail -1 # exec and check hosts google.com and kubernetes.default.svc.cluster.local # ping has been removed! # Destroy all... oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep net-tools | awk '{print $1}' | xargs -n1 oc --config=/etc/origin/master/admin.kubeconfig delete pod -n net || true # --allow-missing-images has been seen to be needed very occasionally. # Sometimes terminating containers don't go away quickly. oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1 # Check on deployment oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep ^net-tools | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1 # If deploy has errored... # Try and rollout latest, ensure it's been cancelled and roll out again. # For debug/info purposes. # pause to allow resolve to work ok oc --config=/etc/origin/master/admin.kubeconfig get pods -n net | grep net-tools | grep -v deploy | awk '{print $1}' | tail -1 oc --config=/etc/origin/master/admin.kubeconfig -n net exec -ti -- /bin/sh -c 'host google.com' oc --config=/etc/origin/master/admin.kubeconfig -n net-tools exec -ti -- /bin/sh -c 'host -s' #test_list also includes: AggregatedLogging MetricsApiProxy NetworkCheck # Sometimes terminating containers don't go away quickly. oc --config=/etc/origin/master/admin.kubeconfig get pods -n | grep ^ - | grep -v deploy | awk '{print $3}' | grep -v Terminating | head -1 # Check on deployment oc --config=/etc/origin/master/admin.kubeconfig get pods -n | grep ^ - | grep -w deploy | awk '{print $3}' | grep -v Terminating | head -1 # If deploy has errored... # Try and rollout latest, ensure it's been cancelled and roll out again. # For debug/info purposes.
1.956636
2
src/data_preparation/LA_data_preparation.py
devership16/INF553-YelpProject
1
6617564
<reponame>devership16/INF553-YelpProject<filename>src/data_preparation/LA_data_preparation.py """ Author: <NAME> """ import pandas as pd import numpy as np from src.data_preparation.input_data_schema import LosAngelesGovtDataSchema from src.data_schema.feature_names import FeatureNames from src.utils.inputOutput_utils import csvWriter """ rename the column feature names as per schema, drop the columns not in schema, create new columns in schema with Null values """ def prepare_la_data_features(df): la_object = LosAngelesGovtDataSchema() schema_object = FeatureNames() df.rename(columns={la_object.COL_NAME: schema_object.COL_NAME}, inplace=True) df[schema_object.COL_LOCATION_NAME] = "Null" df[schema_object.COL_CATEGORY_NAME] = "Null" df.rename(columns={la_object.COL_SITE_ADDRESS: schema_object.COL_ADDRESS}, inplace=True) df.rename(columns={la_object.COL_SITE_CITY: schema_object.COL_CITY}, inplace=True) df.rename(columns={la_object.COL_SITE_STATE: schema_object.COL_STATE}, inplace=True) df.rename(columns={la_object.COL_SITE_ZIP: schema_object.COL_ZIP}, inplace=True) df[schema_object.COL_VIOLATIONS] = "Null" df[schema_object.COL_CURRENT_DEMERITS] = "Null" df[schema_object.COL_INSPECTION_DEMERITS] = "Null" df.rename(columns={la_object.COl_SCORE: schema_object.COL_CURRENT_SCORE}, inplace=True) df[schema_object.COL_INSPECTION_SCORE] = "Null" df.rename(columns={la_object.COL_GRADE: schema_object.COL_CURRENT_GRADE}, inplace=True) df[schema_object.COL_INSPECTION_GRADE] = "Null" df[schema_object.COL_BUSINESS_ID] = "Null" df[schema_object.COL_NEIGHBORHOOD] = "Null" df.rename(columns={la_object.COl_RATING: schema_object.COL_RATING}, inplace=True) df.rename(columns={la_object.COL_REVIEW: schema_object.COL_REVIEW_COUNT}, inplace=True) df[schema_object.COL_ACCEPTS_INSURANCE] = "Null" df[schema_object.COL_AGES_ALLOWED] = "Null" df.rename(columns={la_object.COl_ALCOHOL: schema_object.COL_ALCOHOL}, inplace=True) df.rename(columns={la_object.COl_AMBIENCE: schema_object.COL_AMBIENCE}, inplace=True) df[schema_object.COL_BYOB] = "Null" df[schema_object.COL_BYOB_Corkage] = "Null" df[schema_object.COL_BEST_NIGHTS] = "Null" df.rename(columns={la_object.COl_BIKE_PARKING: schema_object.COL_BIKE_PARKING}, inplace=True) df[schema_object.COL_BUSINESS_ACCEPTS_BITCOIN] = "Null" df.rename(columns={la_object.COl_ACCEPT_CREDIT_CARD: schema_object.COL_BUSINESS_ACCEPTS_CREDITCARDS}, inplace=True) df[schema_object.COL_BUSINESS_PARKING] = "Null" df[schema_object.COL_BYAPPOINTMENTONLY] = "Null" df.rename(columns={la_object.COL_CATERS: schema_object.COL_CATERS}, inplace=True) df[schema_object.COL_COAT_CHECK] = "Null" df[schema_object.COL_CORKAGE] = "Null" df[schema_object.COL_DIETARY_RESTRICTIONS] = "Null" df[schema_object.COL_DOGS_ALLOWED] = "Null" df[schema_object.COL_DRIVE_THRU] = "Null" df[schema_object.COL_GOOD_FOR_DANCING] = "Null" df.rename(columns={la_object.COL_GOOD_FOR_KIDS: schema_object.COL_GOOD_FOR_KIDS}, inplace=True) df[schema_object.COL_GOOD_FOR_MEAL] = "Null" df[schema_object.COL_HAIR_SPECIALIZES_IN] = "Null" df[schema_object.COL_HAPPY_HOUR] = "Null" df.rename(columns={la_object.COL_HAS_TV: schema_object.COL_HAS_TV}, inplace=True) df[schema_object.COL_MUSIC] = "Null" df.rename(columns={la_object.COl_NOISE_LEVEL: schema_object.COL_NOISE_LEVEL}, inplace=True) df[schema_object.COL_OPEN_24_HOURS] = "Null" df.rename(columns={la_object.COL_OUTDOOR_SEATING: schema_object.COL_OUTDOOR_SEATING}, inplace=True) df.rename(columns={la_object.COL_ATTIRE: schema_object.COL_RESTAURANTS_ATTIRE}, inplace=True) df[schema_object.COL_RESTAURANTS_COUNTER_SERVICE] = "Null" df.rename(columns={la_object.COl_DELIVERY: schema_object.COL_RESTAURANTS_DELIVERY}, inplace=True) df.rename(columns={la_object.COL_GOOD_FOR_GROUPS: schema_object.COL_RESTAURANTS_GOOD_FOR_GROUPS}, inplace=True) df.rename(columns={la_object.COl_PRICE: schema_object.COL_RESTAURANTS_PRICE_RANGE2}, inplace=True) df.rename(columns={la_object.COl_TAKES_RESERVATIONS: schema_object.COL_RESTAURANTS_RESERVATIONS}, inplace=True) df[schema_object.COL_RESTAURANTS_TABLE_SERVICE] = "Null" df.rename(columns={la_object.COl_TAKEOUT: schema_object.COL_RESTAURANTS_TAKEOUT}, inplace=True) df[schema_object.COL_SMOKING] = "Null" df[schema_object.COL_WHEELCHAIR_ACCESSIBLE] = "Null" df.rename(columns={la_object.COL_WIFI: schema_object.COL_WIFI}, inplace=True) df.rename(columns={la_object.COl_ACCEPT_APPLE_PAY: schema_object.COL_ACCEPTS_APPLE_PAY}, inplace=True) df[schema_object.COL_ACCEPTS_GOOGLE_PAY] = "Null" df.rename(columns={la_object.COL_GENDER_NEUTRAL_RESTROOMS: schema_object.COL_GENDER_NEUTRAL_RESTROOMS}, inplace=True) df.rename(columns={la_object.COl_GOOD_FOR: schema_object.COL_GOOD_FOR}, inplace=True) df.rename(columns={la_object.COL_GOOD_FOR_WORKING: schema_object.COL_GOOD_FOR_WORKING}, inplace=True) df[schema_object.COL_HAS_GLUTEN_FREE_OPTIONS] = "Null" df[schema_object.COL_HAS_POOL_TABLE] = "Null" df[schema_object.COL_LIKED_BY_VEGANS] = "Null" df[schema_object.COL_LIKED_BY_VEGETARIANS] = "Null" df[schema_object.COL_OFFERS_MILITARY_DISCOUNT] = "Null" df[schema_object.COL_WAITER_SERVICE] = "Null" df.drop([la_object.COL_ACTIVITY_DATE], axis=1, inplace=True) df.drop([la_object.COL_RECORD_ID], axis=1, inplace=True) df.drop([la_object.COL_ADDRESS], axis=1, inplace=True) df.drop([la_object.COL_PROGRAM_ELEMENT_CODE], axis=1, inplace=True) df.drop([la_object.COL_PROGRAM_ELEMENT_CODE_DESCRIPTION], axis=1, inplace=True) df.drop([la_object.COL_SERVICE_DESCRIPTION], axis=1, inplace=True) df.drop([la_object.COL_ROW_ID], axis=1, inplace=True) df.drop([la_object.COL_VIOLATION_CODE], axis=1, inplace=True) df.drop([la_object.COL_VIOLATION_CODE_DESCRIPTION], axis=1, inplace=True) df.drop([la_object.COl_POINTS], axis=1, inplace=True) df.drop([la_object.COl_COUNT], axis=1, inplace=True) df.drop([la_object.COl_PHONE], axis=1, inplace=True) df.drop([la_object.COl_PARKING], axis=1, inplace=True) return df if __name__ == '__main__': input_la_dataset_file = '../../resources/dataset/LA_dataset_v1.csv' output_dataset_file = '../../resources/dataset/LA_dataset_v2.csv' input_df = pd.read_csv(input_la_dataset_file) # prepare features input_df = prepare_la_data_features(input_df) # fill empty values as Null input_df = input_df.replace(np.nan, 'Null') csvWriter(output_dataset_file, input_df)
""" Author: <NAME> """ import pandas as pd import numpy as np from src.data_preparation.input_data_schema import LosAngelesGovtDataSchema from src.data_schema.feature_names import FeatureNames from src.utils.inputOutput_utils import csvWriter """ rename the column feature names as per schema, drop the columns not in schema, create new columns in schema with Null values """ def prepare_la_data_features(df): la_object = LosAngelesGovtDataSchema() schema_object = FeatureNames() df.rename(columns={la_object.COL_NAME: schema_object.COL_NAME}, inplace=True) df[schema_object.COL_LOCATION_NAME] = "Null" df[schema_object.COL_CATEGORY_NAME] = "Null" df.rename(columns={la_object.COL_SITE_ADDRESS: schema_object.COL_ADDRESS}, inplace=True) df.rename(columns={la_object.COL_SITE_CITY: schema_object.COL_CITY}, inplace=True) df.rename(columns={la_object.COL_SITE_STATE: schema_object.COL_STATE}, inplace=True) df.rename(columns={la_object.COL_SITE_ZIP: schema_object.COL_ZIP}, inplace=True) df[schema_object.COL_VIOLATIONS] = "Null" df[schema_object.COL_CURRENT_DEMERITS] = "Null" df[schema_object.COL_INSPECTION_DEMERITS] = "Null" df.rename(columns={la_object.COl_SCORE: schema_object.COL_CURRENT_SCORE}, inplace=True) df[schema_object.COL_INSPECTION_SCORE] = "Null" df.rename(columns={la_object.COL_GRADE: schema_object.COL_CURRENT_GRADE}, inplace=True) df[schema_object.COL_INSPECTION_GRADE] = "Null" df[schema_object.COL_BUSINESS_ID] = "Null" df[schema_object.COL_NEIGHBORHOOD] = "Null" df.rename(columns={la_object.COl_RATING: schema_object.COL_RATING}, inplace=True) df.rename(columns={la_object.COL_REVIEW: schema_object.COL_REVIEW_COUNT}, inplace=True) df[schema_object.COL_ACCEPTS_INSURANCE] = "Null" df[schema_object.COL_AGES_ALLOWED] = "Null" df.rename(columns={la_object.COl_ALCOHOL: schema_object.COL_ALCOHOL}, inplace=True) df.rename(columns={la_object.COl_AMBIENCE: schema_object.COL_AMBIENCE}, inplace=True) df[schema_object.COL_BYOB] = "Null" df[schema_object.COL_BYOB_Corkage] = "Null" df[schema_object.COL_BEST_NIGHTS] = "Null" df.rename(columns={la_object.COl_BIKE_PARKING: schema_object.COL_BIKE_PARKING}, inplace=True) df[schema_object.COL_BUSINESS_ACCEPTS_BITCOIN] = "Null" df.rename(columns={la_object.COl_ACCEPT_CREDIT_CARD: schema_object.COL_BUSINESS_ACCEPTS_CREDITCARDS}, inplace=True) df[schema_object.COL_BUSINESS_PARKING] = "Null" df[schema_object.COL_BYAPPOINTMENTONLY] = "Null" df.rename(columns={la_object.COL_CATERS: schema_object.COL_CATERS}, inplace=True) df[schema_object.COL_COAT_CHECK] = "Null" df[schema_object.COL_CORKAGE] = "Null" df[schema_object.COL_DIETARY_RESTRICTIONS] = "Null" df[schema_object.COL_DOGS_ALLOWED] = "Null" df[schema_object.COL_DRIVE_THRU] = "Null" df[schema_object.COL_GOOD_FOR_DANCING] = "Null" df.rename(columns={la_object.COL_GOOD_FOR_KIDS: schema_object.COL_GOOD_FOR_KIDS}, inplace=True) df[schema_object.COL_GOOD_FOR_MEAL] = "Null" df[schema_object.COL_HAIR_SPECIALIZES_IN] = "Null" df[schema_object.COL_HAPPY_HOUR] = "Null" df.rename(columns={la_object.COL_HAS_TV: schema_object.COL_HAS_TV}, inplace=True) df[schema_object.COL_MUSIC] = "Null" df.rename(columns={la_object.COl_NOISE_LEVEL: schema_object.COL_NOISE_LEVEL}, inplace=True) df[schema_object.COL_OPEN_24_HOURS] = "Null" df.rename(columns={la_object.COL_OUTDOOR_SEATING: schema_object.COL_OUTDOOR_SEATING}, inplace=True) df.rename(columns={la_object.COL_ATTIRE: schema_object.COL_RESTAURANTS_ATTIRE}, inplace=True) df[schema_object.COL_RESTAURANTS_COUNTER_SERVICE] = "Null" df.rename(columns={la_object.COl_DELIVERY: schema_object.COL_RESTAURANTS_DELIVERY}, inplace=True) df.rename(columns={la_object.COL_GOOD_FOR_GROUPS: schema_object.COL_RESTAURANTS_GOOD_FOR_GROUPS}, inplace=True) df.rename(columns={la_object.COl_PRICE: schema_object.COL_RESTAURANTS_PRICE_RANGE2}, inplace=True) df.rename(columns={la_object.COl_TAKES_RESERVATIONS: schema_object.COL_RESTAURANTS_RESERVATIONS}, inplace=True) df[schema_object.COL_RESTAURANTS_TABLE_SERVICE] = "Null" df.rename(columns={la_object.COl_TAKEOUT: schema_object.COL_RESTAURANTS_TAKEOUT}, inplace=True) df[schema_object.COL_SMOKING] = "Null" df[schema_object.COL_WHEELCHAIR_ACCESSIBLE] = "Null" df.rename(columns={la_object.COL_WIFI: schema_object.COL_WIFI}, inplace=True) df.rename(columns={la_object.COl_ACCEPT_APPLE_PAY: schema_object.COL_ACCEPTS_APPLE_PAY}, inplace=True) df[schema_object.COL_ACCEPTS_GOOGLE_PAY] = "Null" df.rename(columns={la_object.COL_GENDER_NEUTRAL_RESTROOMS: schema_object.COL_GENDER_NEUTRAL_RESTROOMS}, inplace=True) df.rename(columns={la_object.COl_GOOD_FOR: schema_object.COL_GOOD_FOR}, inplace=True) df.rename(columns={la_object.COL_GOOD_FOR_WORKING: schema_object.COL_GOOD_FOR_WORKING}, inplace=True) df[schema_object.COL_HAS_GLUTEN_FREE_OPTIONS] = "Null" df[schema_object.COL_HAS_POOL_TABLE] = "Null" df[schema_object.COL_LIKED_BY_VEGANS] = "Null" df[schema_object.COL_LIKED_BY_VEGETARIANS] = "Null" df[schema_object.COL_OFFERS_MILITARY_DISCOUNT] = "Null" df[schema_object.COL_WAITER_SERVICE] = "Null" df.drop([la_object.COL_ACTIVITY_DATE], axis=1, inplace=True) df.drop([la_object.COL_RECORD_ID], axis=1, inplace=True) df.drop([la_object.COL_ADDRESS], axis=1, inplace=True) df.drop([la_object.COL_PROGRAM_ELEMENT_CODE], axis=1, inplace=True) df.drop([la_object.COL_PROGRAM_ELEMENT_CODE_DESCRIPTION], axis=1, inplace=True) df.drop([la_object.COL_SERVICE_DESCRIPTION], axis=1, inplace=True) df.drop([la_object.COL_ROW_ID], axis=1, inplace=True) df.drop([la_object.COL_VIOLATION_CODE], axis=1, inplace=True) df.drop([la_object.COL_VIOLATION_CODE_DESCRIPTION], axis=1, inplace=True) df.drop([la_object.COl_POINTS], axis=1, inplace=True) df.drop([la_object.COl_COUNT], axis=1, inplace=True) df.drop([la_object.COl_PHONE], axis=1, inplace=True) df.drop([la_object.COl_PARKING], axis=1, inplace=True) return df if __name__ == '__main__': input_la_dataset_file = '../../resources/dataset/LA_dataset_v1.csv' output_dataset_file = '../../resources/dataset/LA_dataset_v2.csv' input_df = pd.read_csv(input_la_dataset_file) # prepare features input_df = prepare_la_data_features(input_df) # fill empty values as Null input_df = input_df.replace(np.nan, 'Null') csvWriter(output_dataset_file, input_df)
en
0.676235
Author: <NAME> rename the column feature names as per schema, drop the columns not in schema, create new columns in schema with Null values # prepare features # fill empty values as Null
2.862465
3
pddm/spinup/algos/tf1/memb/core.py
RamiSketcher/pddm
0
6617565
<reponame>RamiSketcher/pddm import numpy as np import tensorflow as tf EPS = 1e-8 def placeholder(dim=None): return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,)) def placeholders(*args): return [placeholder(dim) for dim in args] def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope): return [x for x in tf.global_variables() if scope in x.name] def count_vars(scope): v = get_vars(scope) return sum([np.prod(var.shape.as_list()) for var in v]) def gaussian_likelihood(x, mu, log_std): pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi)) return tf.reduce_sum(pre_sum, axis=1) def clip_but_pass_gradient(x, l=-1., u=1.): clip_up = tf.cast(x > u, tf.float32) clip_low = tf.cast(x < l, tf.float32) return x + tf.stop_gradient((u - x)*clip_up + (l - x)*clip_low) LOG_STD_MAX = 2 LOG_STD_MIN = -20 """ The basic structure of stochastic policy agent """ def mlp_gaussian_policy(x, a, hidden_sizes, activation, output_activation): act_dim = a.shape.as_list()[-1] net = mlp(x, list(hidden_sizes), activation, activation) mu = tf.layers.dense(net, act_dim, activation=output_activation) log_std = tf.layers.dense(net, act_dim, activation=tf.tanh) log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1) ## std = tf.exp(log_std) pi = mu + tf.random_normal(tf.shape(mu)) * std logp_pi = gaussian_likelihood(pi, mu, log_std) return mu, pi, logp_pi def apply_squashing_func(mu, pi, logp_pi): mu = tf.tanh(mu) pi = tf.tanh(pi) # To avoid evil machine precision error, strictly clip 1-pi**2 to [0,1] range. logp_pi -= tf.reduce_sum(tf.log(clip_but_pass_gradient(1 - pi**2, l=0, u=1) + 1e-6), axis=1) ## return mu, pi, logp_pi """ Actor-Critics """ def mlp_actor_critic(x, a, hidden_sizes=(256,256), activation=tf.nn.relu, output_activation=None, policy=mlp_gaussian_policy, action_space=None): # policy with tf.variable_scope('pi'): mu, pi, logp_pi = policy(x, a, hidden_sizes, activation, output_activation) mu, pi, logp_pi = apply_squashing_func(mu, pi, logp_pi) # make sure actions are in correct range action_scale = action_space.high[0] mu *= action_scale pi *= action_scale # create value functions, Q_phi(s,a) , Qpi_phi(s,pi(a|s)) and V_psi(s), for ## Jv(psi) = Expt_st~D[0.5*( V_psi(st) - Expt_at~pi[Qpi_phi(st,pi(at|st))-logpi(at|st)] )^2] -->eq#5 ## Jq(phi) = Expt_(st,at)~D[0.5 ( Q_phi(st,at) - rt - lamda*V_psi(st+1) )^2] -->eq#6 # value(s) = NN(x, unit: [hid_list]+1, act, out_act): value_function_mlp = lambda x : tf.squeeze(mlp(x, list(hidden_sizes)+[1], activation, None), axis=1) with tf.variable_scope('q1'): q1 = value_function_mlp(tf.concat([x,a], axis=-1)) with tf.variable_scope('q1', reuse=True): q1_pi = value_function_mlp(tf.concat([x,pi], axis=-1)) with tf.variable_scope('q2'): q2 = value_function_mlp(tf.concat([x,a], axis=-1)) with tf.variable_scope('q2', reuse=True): q2_pi = value_function_mlp(tf.concat([x,pi], axis=-1)) with tf.variable_scope('v'): v = value_function_mlp(x) # If V'(st+1) for eq#8 return mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v """ Reward and transition dynamic model construction, the hidden_size for each model could be adjusted in each subnetworks. """ def reward_dynamic_model(x, a, pi, hidden_sizes=(256,256), activation=tf.nn.relu, action_space=None): value_function_mlp = lambda x : tf.squeeze(mlp(x, list(hidden_sizes)+[1], activation, None), axis=1) with tf.variable_scope('dm'): transition= dynamic_model(x,a) # st+1 = f(st,at) with tf.variable_scope('dm',reuse=True): transition_pi = dynamic_model(x,pi) # st+1_pi = f_pi(st,pi(at|st)) with tf.variable_scope('rm'): r_rm = reward_model(x,a) # r(st,at) with tf.variable_scope('rm',reuse=True): r_rm_pi = reward_model(x,pi) # r_pi(st,pi(at|st)) with tf.variable_scope('v', reuse=True): v_prime = value_function_mlp(transition_pi) return transition, r_rm, transition_pi ,r_rm_pi, v_prime def dynamic_model(x, a, hidden_sizes=(256,128), activation=tf.nn.relu, output_activation=None): state_dim = x.shape.as_list()[-1] # Count the # of elements in tensor x x = tf.concat([x,a],-1) # State-Action input (st,at), concat(arrays, axis) for h in hidden_sizes: x = tf.layers.dense(x, units=h, activation=activation) # at ~ pi(at|st) = N(mu,std) mu = tf.layers.dense(x, units=state_dim, activation=output_activation) log_std = tf.layers.dense(x, state_dim, activation=tf.tanh) log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1) std = tf.exp(log_std) transition = mu + tf.random_normal(tf.shape(mu)) * std # st+1 = N(mu,std) # Why not learning delta = st+1 - st ? return transition def reward_model(x, a, hidden_sizes=(256,128), activation=tf.nn.relu, output_activation=None): x = tf.concat([x,a],-1) for h in hidden_sizes: x = tf.layers.dense(x, units=h, activation=activation) mu = tf.layers.dense(x, units=1, activation=output_activation) log_std = tf.layers.dense(x, 1, activation=tf.tanh) log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1) std = tf.exp(log_std) reward = mu + tf.random_normal(tf.shape(mu)) * std return reward
import numpy as np import tensorflow as tf EPS = 1e-8 def placeholder(dim=None): return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,)) def placeholders(*args): return [placeholder(dim) for dim in args] def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope): return [x for x in tf.global_variables() if scope in x.name] def count_vars(scope): v = get_vars(scope) return sum([np.prod(var.shape.as_list()) for var in v]) def gaussian_likelihood(x, mu, log_std): pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi)) return tf.reduce_sum(pre_sum, axis=1) def clip_but_pass_gradient(x, l=-1., u=1.): clip_up = tf.cast(x > u, tf.float32) clip_low = tf.cast(x < l, tf.float32) return x + tf.stop_gradient((u - x)*clip_up + (l - x)*clip_low) LOG_STD_MAX = 2 LOG_STD_MIN = -20 """ The basic structure of stochastic policy agent """ def mlp_gaussian_policy(x, a, hidden_sizes, activation, output_activation): act_dim = a.shape.as_list()[-1] net = mlp(x, list(hidden_sizes), activation, activation) mu = tf.layers.dense(net, act_dim, activation=output_activation) log_std = tf.layers.dense(net, act_dim, activation=tf.tanh) log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1) ## std = tf.exp(log_std) pi = mu + tf.random_normal(tf.shape(mu)) * std logp_pi = gaussian_likelihood(pi, mu, log_std) return mu, pi, logp_pi def apply_squashing_func(mu, pi, logp_pi): mu = tf.tanh(mu) pi = tf.tanh(pi) # To avoid evil machine precision error, strictly clip 1-pi**2 to [0,1] range. logp_pi -= tf.reduce_sum(tf.log(clip_but_pass_gradient(1 - pi**2, l=0, u=1) + 1e-6), axis=1) ## return mu, pi, logp_pi """ Actor-Critics """ def mlp_actor_critic(x, a, hidden_sizes=(256,256), activation=tf.nn.relu, output_activation=None, policy=mlp_gaussian_policy, action_space=None): # policy with tf.variable_scope('pi'): mu, pi, logp_pi = policy(x, a, hidden_sizes, activation, output_activation) mu, pi, logp_pi = apply_squashing_func(mu, pi, logp_pi) # make sure actions are in correct range action_scale = action_space.high[0] mu *= action_scale pi *= action_scale # create value functions, Q_phi(s,a) , Qpi_phi(s,pi(a|s)) and V_psi(s), for ## Jv(psi) = Expt_st~D[0.5*( V_psi(st) - Expt_at~pi[Qpi_phi(st,pi(at|st))-logpi(at|st)] )^2] -->eq#5 ## Jq(phi) = Expt_(st,at)~D[0.5 ( Q_phi(st,at) - rt - lamda*V_psi(st+1) )^2] -->eq#6 # value(s) = NN(x, unit: [hid_list]+1, act, out_act): value_function_mlp = lambda x : tf.squeeze(mlp(x, list(hidden_sizes)+[1], activation, None), axis=1) with tf.variable_scope('q1'): q1 = value_function_mlp(tf.concat([x,a], axis=-1)) with tf.variable_scope('q1', reuse=True): q1_pi = value_function_mlp(tf.concat([x,pi], axis=-1)) with tf.variable_scope('q2'): q2 = value_function_mlp(tf.concat([x,a], axis=-1)) with tf.variable_scope('q2', reuse=True): q2_pi = value_function_mlp(tf.concat([x,pi], axis=-1)) with tf.variable_scope('v'): v = value_function_mlp(x) # If V'(st+1) for eq#8 return mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v """ Reward and transition dynamic model construction, the hidden_size for each model could be adjusted in each subnetworks. """ def reward_dynamic_model(x, a, pi, hidden_sizes=(256,256), activation=tf.nn.relu, action_space=None): value_function_mlp = lambda x : tf.squeeze(mlp(x, list(hidden_sizes)+[1], activation, None), axis=1) with tf.variable_scope('dm'): transition= dynamic_model(x,a) # st+1 = f(st,at) with tf.variable_scope('dm',reuse=True): transition_pi = dynamic_model(x,pi) # st+1_pi = f_pi(st,pi(at|st)) with tf.variable_scope('rm'): r_rm = reward_model(x,a) # r(st,at) with tf.variable_scope('rm',reuse=True): r_rm_pi = reward_model(x,pi) # r_pi(st,pi(at|st)) with tf.variable_scope('v', reuse=True): v_prime = value_function_mlp(transition_pi) return transition, r_rm, transition_pi ,r_rm_pi, v_prime def dynamic_model(x, a, hidden_sizes=(256,128), activation=tf.nn.relu, output_activation=None): state_dim = x.shape.as_list()[-1] # Count the # of elements in tensor x x = tf.concat([x,a],-1) # State-Action input (st,at), concat(arrays, axis) for h in hidden_sizes: x = tf.layers.dense(x, units=h, activation=activation) # at ~ pi(at|st) = N(mu,std) mu = tf.layers.dense(x, units=state_dim, activation=output_activation) log_std = tf.layers.dense(x, state_dim, activation=tf.tanh) log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1) std = tf.exp(log_std) transition = mu + tf.random_normal(tf.shape(mu)) * std # st+1 = N(mu,std) # Why not learning delta = st+1 - st ? return transition def reward_model(x, a, hidden_sizes=(256,128), activation=tf.nn.relu, output_activation=None): x = tf.concat([x,a],-1) for h in hidden_sizes: x = tf.layers.dense(x, units=h, activation=activation) mu = tf.layers.dense(x, units=1, activation=output_activation) log_std = tf.layers.dense(x, 1, activation=tf.tanh) log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1) std = tf.exp(log_std) reward = mu + tf.random_normal(tf.shape(mu)) * std return reward
en
0.574167
The basic structure of stochastic policy agent ## # To avoid evil machine precision error, strictly clip 1-pi**2 to [0,1] range. ## Actor-Critics # policy # make sure actions are in correct range # create value functions, Q_phi(s,a) , Qpi_phi(s,pi(a|s)) and V_psi(s), for ## Jv(psi) = Expt_st~D[0.5*( V_psi(st) - Expt_at~pi[Qpi_phi(st,pi(at|st))-logpi(at|st)] )^2] -->eq#5 ## Jq(phi) = Expt_(st,at)~D[0.5 ( Q_phi(st,at) - rt - lamda*V_psi(st+1) )^2] -->eq#6 # value(s) = NN(x, unit: [hid_list]+1, act, out_act): # If V'(st+1) for eq#8 Reward and transition dynamic model construction, the hidden_size for each model could be adjusted in each subnetworks. # st+1 = f(st,at) # st+1_pi = f_pi(st,pi(at|st)) # r(st,at) # r_pi(st,pi(at|st)) # Count the # of elements in tensor x # State-Action input (st,at), concat(arrays, axis) # at ~ pi(at|st) = N(mu,std) # st+1 = N(mu,std) # Why not learning delta = st+1 - st ?
2.34578
2
sktime/series_as_features/tests/test_all_series_as_features_estimators.py
alwinw/sktime
1
6617566
#!/usr/bin/env python3 -u # -*- coding: utf-8 -*- # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) __author__ = ["<NAME>"] __all__ = [] import numpy as np import pandas as pd import pytest from sktime.tests._config import EXCLUDED_ESTIMATORS from sktime.tests._config import NON_STATE_CHANGING_METHODS from sktime.utils import all_estimators from sktime.utils._testing import _construct_instance from sktime.utils._testing import _make_args CLASSIFIERS = [ e[1] for e in all_estimators(estimator_type="classifier") if e[0] not in EXCLUDED_ESTIMATORS ] REGRESSORS = [ e[1] for e in all_estimators(estimator_type="regressor") if e[0] not in EXCLUDED_ESTIMATORS ] SERIES_AS_FEATURES_TRANSFORMERS = [ e[1] for e in all_estimators(estimator_type="series_as_features_transformer") if e[0] not in EXCLUDED_ESTIMATORS ] ALL_SERIES_AS_FEATURES_ESTIMATORS = ( CLASSIFIERS + REGRESSORS + SERIES_AS_FEATURES_TRANSFORMERS ) N_CLASSES = 3 ACCEPTED_OUTPUT_TYPES = (np.ndarray, pd.Series) @pytest.mark.parametrize("Estimator", ALL_SERIES_AS_FEATURES_ESTIMATORS) def test_series_as_features_X_3d_numpy(Estimator): estimator = _construct_instance(Estimator) fit_args = _make_args(estimator, "fit", return_numpy=True) estimator.fit(*fit_args) for method in NON_STATE_CHANGING_METHODS: if hasattr(estimator, method): # try if methods can handle 3d numpy input data try: args = _make_args(estimator, method, return_numpy=True) getattr(estimator, method)(*args) # if not, check if they raise the appropriate error message except ValueError as e: error_msg = "This method requires X to be a nested pd.DataFrame" assert error_msg in str(e), ( f"{estimator.__class__.__name__} does " f"not handle 3d numpy input data " f"correctly" ) @pytest.mark.parametrize("Estimator", CLASSIFIERS + REGRESSORS) def test_series_as_features_multivariate_input(Estimator): # check if multivariate input is correctly handled n_columns = 2 error_msg = ( f"X must be univariate " f"with X.shape[1] == 1, but found: " f"X.shape[1] == {n_columns}." ) estimator = _construct_instance(Estimator) X_train, y_train = _make_args(estimator, "fit", n_columns=n_columns) # check if estimator can handle multivariate data try: estimator.fit(X_train, y_train) # TODO include series-as-features transformers for method in ("predict", "predict_proba"): X = _make_args(estimator, method, n_columns=n_columns)[0] getattr(estimator, method)(X) # if not, check if error with appropriate message is raised except ValueError as e: assert error_msg in str(e), ( f"{estimator.__class__.__name__} does not handle multivariate " f"data and does not raise an appropriate error when multivariate " f"data is passed" ) @pytest.mark.parametrize("Estimator", CLASSIFIERS) def test_classifier_output(Estimator): estimator = _construct_instance(Estimator) X_train, y_train = _make_args(estimator, "fit", n_classes=N_CLASSES) estimator.fit(X_train, y_train) X = _make_args(estimator, "predict")[0] # check predict y_pred = estimator.predict(X) assert isinstance(y_pred, ACCEPTED_OUTPUT_TYPES) assert y_pred.shape == (X.shape[0],) assert np.all(np.isin(np.unique(y_pred), np.unique(y_train))) # check predict proba if hasattr(estimator, "predict_proba"): y_proba = estimator.predict_proba(X) assert isinstance(y_proba, ACCEPTED_OUTPUT_TYPES) assert y_proba.shape == (X.shape[0], N_CLASSES) np.testing.assert_allclose(y_proba.sum(axis=1), 1) assert np.all(np.isin(np.unique(y_pred), np.unique(y_train))) @pytest.mark.parametrize("Estimator", REGRESSORS) def test_regressor_output(Estimator): estimator = _construct_instance(Estimator) X_train, y_train = _make_args(estimator, "fit") estimator.fit(X_train, y_train) X = _make_args(estimator, "predict")[0] # check predict y_pred = estimator.predict(X) assert isinstance(y_pred, ACCEPTED_OUTPUT_TYPES) assert y_pred.shape == (X.shape[0],) assert np.issubdtype(y_pred.dtype, np.floating)
#!/usr/bin/env python3 -u # -*- coding: utf-8 -*- # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) __author__ = ["<NAME>"] __all__ = [] import numpy as np import pandas as pd import pytest from sktime.tests._config import EXCLUDED_ESTIMATORS from sktime.tests._config import NON_STATE_CHANGING_METHODS from sktime.utils import all_estimators from sktime.utils._testing import _construct_instance from sktime.utils._testing import _make_args CLASSIFIERS = [ e[1] for e in all_estimators(estimator_type="classifier") if e[0] not in EXCLUDED_ESTIMATORS ] REGRESSORS = [ e[1] for e in all_estimators(estimator_type="regressor") if e[0] not in EXCLUDED_ESTIMATORS ] SERIES_AS_FEATURES_TRANSFORMERS = [ e[1] for e in all_estimators(estimator_type="series_as_features_transformer") if e[0] not in EXCLUDED_ESTIMATORS ] ALL_SERIES_AS_FEATURES_ESTIMATORS = ( CLASSIFIERS + REGRESSORS + SERIES_AS_FEATURES_TRANSFORMERS ) N_CLASSES = 3 ACCEPTED_OUTPUT_TYPES = (np.ndarray, pd.Series) @pytest.mark.parametrize("Estimator", ALL_SERIES_AS_FEATURES_ESTIMATORS) def test_series_as_features_X_3d_numpy(Estimator): estimator = _construct_instance(Estimator) fit_args = _make_args(estimator, "fit", return_numpy=True) estimator.fit(*fit_args) for method in NON_STATE_CHANGING_METHODS: if hasattr(estimator, method): # try if methods can handle 3d numpy input data try: args = _make_args(estimator, method, return_numpy=True) getattr(estimator, method)(*args) # if not, check if they raise the appropriate error message except ValueError as e: error_msg = "This method requires X to be a nested pd.DataFrame" assert error_msg in str(e), ( f"{estimator.__class__.__name__} does " f"not handle 3d numpy input data " f"correctly" ) @pytest.mark.parametrize("Estimator", CLASSIFIERS + REGRESSORS) def test_series_as_features_multivariate_input(Estimator): # check if multivariate input is correctly handled n_columns = 2 error_msg = ( f"X must be univariate " f"with X.shape[1] == 1, but found: " f"X.shape[1] == {n_columns}." ) estimator = _construct_instance(Estimator) X_train, y_train = _make_args(estimator, "fit", n_columns=n_columns) # check if estimator can handle multivariate data try: estimator.fit(X_train, y_train) # TODO include series-as-features transformers for method in ("predict", "predict_proba"): X = _make_args(estimator, method, n_columns=n_columns)[0] getattr(estimator, method)(X) # if not, check if error with appropriate message is raised except ValueError as e: assert error_msg in str(e), ( f"{estimator.__class__.__name__} does not handle multivariate " f"data and does not raise an appropriate error when multivariate " f"data is passed" ) @pytest.mark.parametrize("Estimator", CLASSIFIERS) def test_classifier_output(Estimator): estimator = _construct_instance(Estimator) X_train, y_train = _make_args(estimator, "fit", n_classes=N_CLASSES) estimator.fit(X_train, y_train) X = _make_args(estimator, "predict")[0] # check predict y_pred = estimator.predict(X) assert isinstance(y_pred, ACCEPTED_OUTPUT_TYPES) assert y_pred.shape == (X.shape[0],) assert np.all(np.isin(np.unique(y_pred), np.unique(y_train))) # check predict proba if hasattr(estimator, "predict_proba"): y_proba = estimator.predict_proba(X) assert isinstance(y_proba, ACCEPTED_OUTPUT_TYPES) assert y_proba.shape == (X.shape[0], N_CLASSES) np.testing.assert_allclose(y_proba.sum(axis=1), 1) assert np.all(np.isin(np.unique(y_pred), np.unique(y_train))) @pytest.mark.parametrize("Estimator", REGRESSORS) def test_regressor_output(Estimator): estimator = _construct_instance(Estimator) X_train, y_train = _make_args(estimator, "fit") estimator.fit(X_train, y_train) X = _make_args(estimator, "predict")[0] # check predict y_pred = estimator.predict(X) assert isinstance(y_pred, ACCEPTED_OUTPUT_TYPES) assert y_pred.shape == (X.shape[0],) assert np.issubdtype(y_pred.dtype, np.floating)
en
0.468176
#!/usr/bin/env python3 -u # -*- coding: utf-8 -*- # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) # try if methods can handle 3d numpy input data # if not, check if they raise the appropriate error message # check if multivariate input is correctly handled # check if estimator can handle multivariate data # TODO include series-as-features transformers # if not, check if error with appropriate message is raised # check predict # check predict proba # check predict
2.043736
2
host/run-mig.py
wingel/sds7102
47
6617567
#! /usr/bin/python import time import random from sds import SDS, decode_mig_status def main(): sds = SDS('sds') # sds.capture(16) if 1: print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) if 1: print "Reset" sds.write_soc_reg(0x200, 1) print "ctrl 0x%08x" % sds.read_soc_reg(0x200) sds.write_soc_reg(0x200, 0) print "ctrl 0x%08x" % sds.read_soc_reg(0x200) time.sleep(0.1) print "ctrl 0x%08x" % sds.read_soc_reg(0x200) print decode_mig_status(sds.read_soc_reg(0x211)) n = 3 o = 10 if 1: print "write to FIFO" for i in range(n): sds.write_soc_reg(0x218, 0xf00f0000 + i) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) print "write to DDR" sds.write_soc_reg(0x210, o | ((n-1)<<24) | (0<<30)) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) sds.write_ddr(20, [ 0xdeadbeef, 0xfeedf00f ]) n = 31 o = 0 if 1: print "read from DDR" sds.write_soc_reg(0x210, o | ((n-1)<<24) | (1<<30)) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) print "read from FIFO" for i in range(n): print "rd %2d -> 0x%08x" % (i, sds.read_soc_reg(0x218)) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) data = sds.read_ddr(0, 32) for i in range(len(data)): print "%2d -> 0x%08x" % (i, data[i]) n = 0x100 o = 0x100 wr_data = [ random.randrange(1<<32) for _ in range(n) ] sds.write_ddr(o, wr_data) rd_data = sds.read_ddr(o, n) assert all(wr_data == rd_data) if __name__ == '__main__': main()
#! /usr/bin/python import time import random from sds import SDS, decode_mig_status def main(): sds = SDS('sds') # sds.capture(16) if 1: print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) if 1: print "Reset" sds.write_soc_reg(0x200, 1) print "ctrl 0x%08x" % sds.read_soc_reg(0x200) sds.write_soc_reg(0x200, 0) print "ctrl 0x%08x" % sds.read_soc_reg(0x200) time.sleep(0.1) print "ctrl 0x%08x" % sds.read_soc_reg(0x200) print decode_mig_status(sds.read_soc_reg(0x211)) n = 3 o = 10 if 1: print "write to FIFO" for i in range(n): sds.write_soc_reg(0x218, 0xf00f0000 + i) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) print "write to DDR" sds.write_soc_reg(0x210, o | ((n-1)<<24) | (0<<30)) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) sds.write_ddr(20, [ 0xdeadbeef, 0xfeedf00f ]) n = 31 o = 0 if 1: print "read from DDR" sds.write_soc_reg(0x210, o | ((n-1)<<24) | (1<<30)) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) print "read from FIFO" for i in range(n): print "rd %2d -> 0x%08x" % (i, sds.read_soc_reg(0x218)) time.sleep(0.1) print "counts 0x%08x" % sds.read_soc_reg(0x212) decode_mig_status(sds.read_soc_reg(0x211)) data = sds.read_ddr(0, 32) for i in range(len(data)): print "%2d -> 0x%08x" % (i, data[i]) n = 0x100 o = 0x100 wr_data = [ random.randrange(1<<32) for _ in range(n) ] sds.write_ddr(o, wr_data) rd_data = sds.read_ddr(o, n) assert all(wr_data == rd_data) if __name__ == '__main__': main()
en
0.416013
#! /usr/bin/python # sds.capture(16)
2.340981
2
archived/functions/higgs/SVM_model_avg_reduce_ec.py
DS3Lab/LambdaML
23
6617568
import time import numpy as np import pickle import torch from torch.autograd import Variable from torch.utils.data.sampler import SubsetRandomSampler from archived.elasticache.Memcached import hset_object from archived.elasticache.Memcached import memcached_init from archived.s3.get_object import get_object from archived.s3 import put_object from archived.sync import merge_w_b_grads, put_merged_w_b_grads, get_merged_w_b_grads from archived.old_model.SVM import SVM from data_loader.libsvm_dataset import DenseDatasetWithLines # lambda setting local_dir = "/tmp" w_prefix = "w_" b_prefix = "b_" # algorithm setting num_features = 30 num_classes = 2 learning_rate = 0.01 batch_size = 1000 num_epochs = 2 validation_ratio = .2 shuffle_dataset = True random_seed = 42 def handler(event, context): start_time = time.time() bucket = event['bucket_name'] worker_index = event['rank'] num_workers = event['num_workers'] key = event['file'] merged_bucket = event['merged_bucket'] num_epochs = event['num_epochs'] learning_rate = event['learning_rate'] batch_size = event['batch_size'] elasti_location = event['elasticache'] endpoint = memcached_init(elasti_location) print('bucket = {}'.format(bucket)) print("file = {}".format(key)) print('merged bucket = {}'.format(merged_bucket)) print('number of workers = {}'.format(num_workers)) print('worker index = {}'.format(worker_index)) print('num epochs = {}'.format(num_epochs)) print('learning rate = {}'.format(learning_rate)) print("batch size = {}".format(batch_size)) # read file from s3 file = get_object(bucket, key).read().decode('utf-8').split("\n") print("read data cost {} s".format(time.time() - start_time)) parse_start = time.time() dataset = DenseDatasetWithLines(file, num_features) print("parse data cost {} s".format(time.time() - parse_start)) preprocess_start = time.time() # Creating data indices for training and validation splits: dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(validation_ratio * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler) print("preprocess data cost {} s, dataset size = {}" .format(time.time() - preprocess_start, dataset_size)) model = SVM(num_features, num_classes) # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) train_loss = [] test_loss = [] test_acc = [] epoch_time = 0 # Training the Model epoch_start = time.time() for epoch in range(num_epochs): tmp_train = 0 for batch_index, (items, labels) in enumerate(train_loader): print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index)) batch_start = time.time() items = Variable(items.view(-1, num_features)) labels = Variable(labels) # Forward + Backward + Optimize optimizer.zero_grad() outputs = model(items) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (batch_index + 1) % 1 == 0: print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f' % (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size, loss.data)) tmp_train = tmp_train + loss.item() train_loss.append(tmp_train / (batch_index + 1)) # sync model w_model = model.linear.weight.data.numpy() b_model = model.linear.bias.data.numpy() epoch_time = time.time() - epoch_start + epoch_time # synchronization starts from that every worker writes their model after this epoch sync_start = time.time() hset_object(endpoint, merged_bucket, w_prefix + str(worker_index), w_model.tobytes()) hset_object(endpoint, merged_bucket, b_prefix + str(worker_index), b_model.tobytes()) tmp_write_local_epoch_time = time.time() - sync_start print("write local model cost = {}".format(tmp_write_local_epoch_time)) # merge gradients among files file_postfix = "{}".format(epoch) if worker_index == 0: merge_start = time.time() w_model_merge, b_model_merge = merge_w_b_grads(endpoint, merged_bucket, num_workers, w_model.dtype, w_model.shape, b_model.shape, w_prefix, b_prefix) put_merged_w_b_grads(endpoint, merged_bucket, w_model_merge, b_model_merge, file_postfix, w_prefix, b_prefix) else: w_model_merge, b_model_merge = get_merged_w_b_grads(endpoint, merged_bucket, file_postfix, w_model.dtype, w_model.shape, b_model.shape, w_prefix, b_prefix) model.linear.weight.data = Variable(torch.from_numpy(w_model_merge)) model.linear.bias.data = Variable(torch.from_numpy(b_model_merge)) tmp_sync_time = time.time() - sync_start print("synchronization cost {} s".format(tmp_sync_time)) # Test the Model correct = 0 total = 0 count = 0 tmp_test = 0 for items, labels in validation_loader: items = Variable(items.view(-1, num_features)) outputs = model(items) loss = criterion(outputs, labels) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() tmp_test = tmp_test + loss.item() count = count + 1 # print('Accuracy of the model on the %d test samples: %d %%' % (len(val_indices), 100 * correct / total)) test_acc.append(100 * correct / total) test_loss.append(tmp_test / count) epoch_start = time.time() end_time = time.time() print("elapsed time = {} s".format(end_time - start_time)) loss_record = [test_loss, test_acc, train_loss, epoch_time] put_object("model-average-loss", "average_loss{}".format(worker_index), pickle.dumps(loss_record))
import time import numpy as np import pickle import torch from torch.autograd import Variable from torch.utils.data.sampler import SubsetRandomSampler from archived.elasticache.Memcached import hset_object from archived.elasticache.Memcached import memcached_init from archived.s3.get_object import get_object from archived.s3 import put_object from archived.sync import merge_w_b_grads, put_merged_w_b_grads, get_merged_w_b_grads from archived.old_model.SVM import SVM from data_loader.libsvm_dataset import DenseDatasetWithLines # lambda setting local_dir = "/tmp" w_prefix = "w_" b_prefix = "b_" # algorithm setting num_features = 30 num_classes = 2 learning_rate = 0.01 batch_size = 1000 num_epochs = 2 validation_ratio = .2 shuffle_dataset = True random_seed = 42 def handler(event, context): start_time = time.time() bucket = event['bucket_name'] worker_index = event['rank'] num_workers = event['num_workers'] key = event['file'] merged_bucket = event['merged_bucket'] num_epochs = event['num_epochs'] learning_rate = event['learning_rate'] batch_size = event['batch_size'] elasti_location = event['elasticache'] endpoint = memcached_init(elasti_location) print('bucket = {}'.format(bucket)) print("file = {}".format(key)) print('merged bucket = {}'.format(merged_bucket)) print('number of workers = {}'.format(num_workers)) print('worker index = {}'.format(worker_index)) print('num epochs = {}'.format(num_epochs)) print('learning rate = {}'.format(learning_rate)) print("batch size = {}".format(batch_size)) # read file from s3 file = get_object(bucket, key).read().decode('utf-8').split("\n") print("read data cost {} s".format(time.time() - start_time)) parse_start = time.time() dataset = DenseDatasetWithLines(file, num_features) print("parse data cost {} s".format(time.time() - parse_start)) preprocess_start = time.time() # Creating data indices for training and validation splits: dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(validation_ratio * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler) print("preprocess data cost {} s, dataset size = {}" .format(time.time() - preprocess_start, dataset_size)) model = SVM(num_features, num_classes) # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) train_loss = [] test_loss = [] test_acc = [] epoch_time = 0 # Training the Model epoch_start = time.time() for epoch in range(num_epochs): tmp_train = 0 for batch_index, (items, labels) in enumerate(train_loader): print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index)) batch_start = time.time() items = Variable(items.view(-1, num_features)) labels = Variable(labels) # Forward + Backward + Optimize optimizer.zero_grad() outputs = model(items) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (batch_index + 1) % 1 == 0: print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f' % (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size, loss.data)) tmp_train = tmp_train + loss.item() train_loss.append(tmp_train / (batch_index + 1)) # sync model w_model = model.linear.weight.data.numpy() b_model = model.linear.bias.data.numpy() epoch_time = time.time() - epoch_start + epoch_time # synchronization starts from that every worker writes their model after this epoch sync_start = time.time() hset_object(endpoint, merged_bucket, w_prefix + str(worker_index), w_model.tobytes()) hset_object(endpoint, merged_bucket, b_prefix + str(worker_index), b_model.tobytes()) tmp_write_local_epoch_time = time.time() - sync_start print("write local model cost = {}".format(tmp_write_local_epoch_time)) # merge gradients among files file_postfix = "{}".format(epoch) if worker_index == 0: merge_start = time.time() w_model_merge, b_model_merge = merge_w_b_grads(endpoint, merged_bucket, num_workers, w_model.dtype, w_model.shape, b_model.shape, w_prefix, b_prefix) put_merged_w_b_grads(endpoint, merged_bucket, w_model_merge, b_model_merge, file_postfix, w_prefix, b_prefix) else: w_model_merge, b_model_merge = get_merged_w_b_grads(endpoint, merged_bucket, file_postfix, w_model.dtype, w_model.shape, b_model.shape, w_prefix, b_prefix) model.linear.weight.data = Variable(torch.from_numpy(w_model_merge)) model.linear.bias.data = Variable(torch.from_numpy(b_model_merge)) tmp_sync_time = time.time() - sync_start print("synchronization cost {} s".format(tmp_sync_time)) # Test the Model correct = 0 total = 0 count = 0 tmp_test = 0 for items, labels in validation_loader: items = Variable(items.view(-1, num_features)) outputs = model(items) loss = criterion(outputs, labels) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() tmp_test = tmp_test + loss.item() count = count + 1 # print('Accuracy of the model on the %d test samples: %d %%' % (len(val_indices), 100 * correct / total)) test_acc.append(100 * correct / total) test_loss.append(tmp_test / count) epoch_start = time.time() end_time = time.time() print("elapsed time = {} s".format(end_time - start_time)) loss_record = [test_loss, test_acc, train_loss, epoch_time] put_object("model-average-loss", "average_loss{}".format(worker_index), pickle.dumps(loss_record))
en
0.827049
# lambda setting # algorithm setting # read file from s3 # Creating data indices for training and validation splits: # Creating PT data samplers and loaders: # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. # Training the Model # Forward + Backward + Optimize # sync model # synchronization starts from that every worker writes their model after this epoch # merge gradients among files # Test the Model # print('Accuracy of the model on the %d test samples: %d %%' % (len(val_indices), 100 * correct / total))
2.031502
2
utils/__init__.py
1mplex/segmentation_image_augmentation
15
6617569
from utils.show import * from utils.read import read from utils.pil import pil2np, np2pil from utils.colors import generate_colors from utils.mask2mask import * from utils.list_files import list_files from utils.get_data_list import get_img_mask_list, get_images_list from utils.pack_images import get_pack_coords from utils.check_is_image import check_is_image from utils.format_image import format_image from utils.pad import pad from utils.csv import * from utils.write import write from utils.colors import human2machine_mask from utils.supervisely2sia import supervisely2sia
from utils.show import * from utils.read import read from utils.pil import pil2np, np2pil from utils.colors import generate_colors from utils.mask2mask import * from utils.list_files import list_files from utils.get_data_list import get_img_mask_list, get_images_list from utils.pack_images import get_pack_coords from utils.check_is_image import check_is_image from utils.format_image import format_image from utils.pad import pad from utils.csv import * from utils.write import write from utils.colors import human2machine_mask from utils.supervisely2sia import supervisely2sia
none
1
1.435193
1
topika/common.py
sphuber/topika
0
6617570
from __future__ import absolute_import import contextlib import enum import tornado.ioloop from tornado import gen import tornado.gen from tornado.gen import coroutine, Return from . import exceptions from . import tools @enum.unique class ConfirmationTypes(enum.Enum): ACK = 'ack' NACK = 'nack' def ensure_coroutine(func_or_coro): if gen.is_coroutine_function(func_or_coro): return func_or_coro else: @coroutine def coro(*args, **kwargs): raise Return(func_or_coro(*args, **kwargs)) return coro class _CallbackWrapper(object): """ Take a callback that expects the first argument to be the object corresponding to the source of the message and replace it with a proxy object that the function will receive as the source object instead. This wrapper will also correctly call the callback in the correct way whether it is a coroutine or plain function. """ def __init__(self, source_proxy, callback): self._proxy = source_proxy self._callback = ensure_coroutine(callback) @gen.coroutine def __call__(self, unused_source, *args, **kwargs): result = yield self._callback(self._proxy, *args, **kwargs) raise Return(result) def future_with_timeout(loop, timeout, future=None): """ Create a future with a timeout :param loop: The tornado event loop :param timeout: The timeout in seconds :param future: An optional existing future :return: """ loop = loop if loop else tornado.ioloop.IOLoop.current() f = future or tools.create_future(loop=loop) def on_timeout(): if f.done(): return f.set_exception(tornado.ioloop.TimeoutError) if timeout: handle = loop.call_later(timeout, on_timeout) def on_result(_unused_future): # Cancel the timeout if the future is done loop.remove_timeout(handle) f.add_done_callback(on_result) return f class FutureStore(object): """ Borrowed from aio_pika (https://github.com/mosquito/aio-pika) """ __slots__ = "__collection", "__loop", "__parent_store" def __init__(self, loop, parent_store=None): self.__parent_store = parent_store self.__collection = set() self.__loop = loop if loop else tornado.ioloop.IOLoop.current() def _on_future_done(self, future): if future in self.__collection: self.__collection.remove(future) @staticmethod def _reject_future(future, exception): if future.done(): return future.set_exception(exception) def add(self, future): if self.__parent_store: self.__parent_store.add(future) self.__collection.add(future) future.add_done_callback(self._on_future_done) def reject_all(self, exception): for future in list(self.__collection): self.__collection.remove(future) self.__loop.add_callback(self._reject_future, future, exception) @staticmethod def _on_timeout(future): if future.done(): return future.set_exception(tornado.ioloop.TimeoutError) def create_future(self, timeout=None): future = future_with_timeout(self.__loop, timeout) self.add(future) if self.__parent_store: self.__parent_store.add(future) return future def create_child(self): return FutureStore(self.__loop, parent_store=self) @contextlib.contextmanager def pending_future(self, timeout=None): future = None try: future = self.create_future(timeout) yield future finally: # Cleanup if future in self.__collection: self.__collection.remove(future) class BaseChannel(object): __slots__ = ('_channel_futures', 'loop', '_futures', '_closing') def __init__(self, loop, future_store): """ :type loop: :class:`tornado.ioloop.IOLoop` :type future_store: :class:`FutureStore` """ self.loop = loop self._futures = future_store self._closing = tools.create_future(loop=self.loop) @property def is_closed(self): return self._closing.done() def _create_future(self, timeout=None): f = self._futures.create_future(timeout) return f @staticmethod def _ensure_channel_is_open(func): @contextlib.wraps(func) @tools.coroutine def wrap(self, *args, **kwargs): if self.is_closed: raise RuntimeError("The channel is closed") raise gen.Return((yield func(self, *args, **kwargs))) return wrap def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, getattr(self, 'name', id(self)))
from __future__ import absolute_import import contextlib import enum import tornado.ioloop from tornado import gen import tornado.gen from tornado.gen import coroutine, Return from . import exceptions from . import tools @enum.unique class ConfirmationTypes(enum.Enum): ACK = 'ack' NACK = 'nack' def ensure_coroutine(func_or_coro): if gen.is_coroutine_function(func_or_coro): return func_or_coro else: @coroutine def coro(*args, **kwargs): raise Return(func_or_coro(*args, **kwargs)) return coro class _CallbackWrapper(object): """ Take a callback that expects the first argument to be the object corresponding to the source of the message and replace it with a proxy object that the function will receive as the source object instead. This wrapper will also correctly call the callback in the correct way whether it is a coroutine or plain function. """ def __init__(self, source_proxy, callback): self._proxy = source_proxy self._callback = ensure_coroutine(callback) @gen.coroutine def __call__(self, unused_source, *args, **kwargs): result = yield self._callback(self._proxy, *args, **kwargs) raise Return(result) def future_with_timeout(loop, timeout, future=None): """ Create a future with a timeout :param loop: The tornado event loop :param timeout: The timeout in seconds :param future: An optional existing future :return: """ loop = loop if loop else tornado.ioloop.IOLoop.current() f = future or tools.create_future(loop=loop) def on_timeout(): if f.done(): return f.set_exception(tornado.ioloop.TimeoutError) if timeout: handle = loop.call_later(timeout, on_timeout) def on_result(_unused_future): # Cancel the timeout if the future is done loop.remove_timeout(handle) f.add_done_callback(on_result) return f class FutureStore(object): """ Borrowed from aio_pika (https://github.com/mosquito/aio-pika) """ __slots__ = "__collection", "__loop", "__parent_store" def __init__(self, loop, parent_store=None): self.__parent_store = parent_store self.__collection = set() self.__loop = loop if loop else tornado.ioloop.IOLoop.current() def _on_future_done(self, future): if future in self.__collection: self.__collection.remove(future) @staticmethod def _reject_future(future, exception): if future.done(): return future.set_exception(exception) def add(self, future): if self.__parent_store: self.__parent_store.add(future) self.__collection.add(future) future.add_done_callback(self._on_future_done) def reject_all(self, exception): for future in list(self.__collection): self.__collection.remove(future) self.__loop.add_callback(self._reject_future, future, exception) @staticmethod def _on_timeout(future): if future.done(): return future.set_exception(tornado.ioloop.TimeoutError) def create_future(self, timeout=None): future = future_with_timeout(self.__loop, timeout) self.add(future) if self.__parent_store: self.__parent_store.add(future) return future def create_child(self): return FutureStore(self.__loop, parent_store=self) @contextlib.contextmanager def pending_future(self, timeout=None): future = None try: future = self.create_future(timeout) yield future finally: # Cleanup if future in self.__collection: self.__collection.remove(future) class BaseChannel(object): __slots__ = ('_channel_futures', 'loop', '_futures', '_closing') def __init__(self, loop, future_store): """ :type loop: :class:`tornado.ioloop.IOLoop` :type future_store: :class:`FutureStore` """ self.loop = loop self._futures = future_store self._closing = tools.create_future(loop=self.loop) @property def is_closed(self): return self._closing.done() def _create_future(self, timeout=None): f = self._futures.create_future(timeout) return f @staticmethod def _ensure_channel_is_open(func): @contextlib.wraps(func) @tools.coroutine def wrap(self, *args, **kwargs): if self.is_closed: raise RuntimeError("The channel is closed") raise gen.Return((yield func(self, *args, **kwargs))) return wrap def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, getattr(self, 'name', id(self)))
en
0.643722
Take a callback that expects the first argument to be the object corresponding to the source of the message and replace it with a proxy object that the function will receive as the source object instead. This wrapper will also correctly call the callback in the correct way whether it is a coroutine or plain function. Create a future with a timeout :param loop: The tornado event loop :param timeout: The timeout in seconds :param future: An optional existing future :return: # Cancel the timeout if the future is done Borrowed from aio_pika (https://github.com/mosquito/aio-pika) # Cleanup :type loop: :class:`tornado.ioloop.IOLoop` :type future_store: :class:`FutureStore`
2.271132
2
models/windows_information_protection.py
MIchaelMainer/msgraph-v10-models-python
1
6617571
# -*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. # # This file was generated and any changes will be overwritten. ''' from __future__ import unicode_literals from ..model.windows_information_protection_enforcement_level import WindowsInformationProtectionEnforcementLevel from ..model.windows_information_protection_resource_collection import WindowsInformationProtectionResourceCollection from ..model.windows_information_protection_data_recovery_certificate import WindowsInformationProtectionDataRecoveryCertificate from ..model.windows_information_protection_app import WindowsInformationProtectionApp from ..model.windows_information_protection_proxied_domain_collection import WindowsInformationProtectionProxiedDomainCollection from ..model.windows_information_protection_ip_range_collection import WindowsInformationProtectionIPRangeCollection from ..model.windows_information_protection_app_locker_file import WindowsInformationProtectionAppLockerFile from ..model.targeted_managed_app_policy_assignment import TargetedManagedAppPolicyAssignment from ..one_drive_object_base import OneDriveObjectBase class WindowsInformationProtection(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def enforcement_level(self): """ Gets and sets the enforcementLevel Returns: :class:`WindowsInformationProtectionEnforcementLevel<onedrivesdk.model.windows_information_protection_enforcement_level.WindowsInformationProtectionEnforcementLevel>`: The enforcementLevel """ if "enforcementLevel" in self._prop_dict: if isinstance(self._prop_dict["enforcementLevel"], OneDriveObjectBase): return self._prop_dict["enforcementLevel"] else : self._prop_dict["enforcementLevel"] = WindowsInformationProtectionEnforcementLevel(self._prop_dict["enforcementLevel"]) return self._prop_dict["enforcementLevel"] return None @enforcement_level.setter def enforcement_level(self, val): self._prop_dict["enforcementLevel"] = val @property def enterprise_domain(self): """ Gets and sets the enterpriseDomain Returns: str: The enterpriseDomain """ if "enterpriseDomain" in self._prop_dict: return self._prop_dict["enterpriseDomain"] else: return None @enterprise_domain.setter def enterprise_domain(self, val): self._prop_dict["enterpriseDomain"] = val @property def enterprise_protected_domain_names(self): """Gets and sets the enterpriseProtectedDomainNames Returns: :class:`EnterpriseProtectedDomainNamesCollectionPage<onedrivesdk.request.enterprise_protected_domain_names_collection.EnterpriseProtectedDomainNamesCollectionPage>`: The enterpriseProtectedDomainNames """ if "enterpriseProtectedDomainNames" in self._prop_dict: return EnterpriseProtectedDomainNamesCollectionPage(self._prop_dict["enterpriseProtectedDomainNames"]) else: return None @property def protection_under_lock_config_required(self): """ Gets and sets the protectionUnderLockConfigRequired Returns: bool: The protectionUnderLockConfigRequired """ if "protectionUnderLockConfigRequired" in self._prop_dict: return self._prop_dict["protectionUnderLockConfigRequired"] else: return None @protection_under_lock_config_required.setter def protection_under_lock_config_required(self, val): self._prop_dict["protectionUnderLockConfigRequired"] = val @property def data_recovery_certificate(self): """ Gets and sets the dataRecoveryCertificate Returns: :class:`WindowsInformationProtectionDataRecoveryCertificate<onedrivesdk.model.windows_information_protection_data_recovery_certificate.WindowsInformationProtectionDataRecoveryCertificate>`: The dataRecoveryCertificate """ if "dataRecoveryCertificate" in self._prop_dict: if isinstance(self._prop_dict["dataRecoveryCertificate"], OneDriveObjectBase): return self._prop_dict["dataRecoveryCertificate"] else : self._prop_dict["dataRecoveryCertificate"] = WindowsInformationProtectionDataRecoveryCertificate(self._prop_dict["dataRecoveryCertificate"]) return self._prop_dict["dataRecoveryCertificate"] return None @data_recovery_certificate.setter def data_recovery_certificate(self, val): self._prop_dict["dataRecoveryCertificate"] = val @property def revoke_on_unenroll_disabled(self): """ Gets and sets the revokeOnUnenrollDisabled Returns: bool: The revokeOnUnenrollDisabled """ if "revokeOnUnenrollDisabled" in self._prop_dict: return self._prop_dict["revokeOnUnenrollDisabled"] else: return None @revoke_on_unenroll_disabled.setter def revoke_on_unenroll_disabled(self, val): self._prop_dict["revokeOnUnenrollDisabled"] = val @property def rights_management_services_template_id(self): """ Gets and sets the rightsManagementServicesTemplateId Returns: UUID: The rightsManagementServicesTemplateId """ if "rightsManagementServicesTemplateId" in self._prop_dict: return self._prop_dict["rightsManagementServicesTemplateId"] else: return None @rights_management_services_template_id.setter def rights_management_services_template_id(self, val): self._prop_dict["rightsManagementServicesTemplateId"] = val @property def azure_rights_management_services_allowed(self): """ Gets and sets the azureRightsManagementServicesAllowed Returns: bool: The azureRightsManagementServicesAllowed """ if "azureRightsManagementServicesAllowed" in self._prop_dict: return self._prop_dict["azureRightsManagementServicesAllowed"] else: return None @azure_rights_management_services_allowed.setter def azure_rights_management_services_allowed(self, val): self._prop_dict["azureRightsManagementServicesAllowed"] = val @property def icons_visible(self): """ Gets and sets the iconsVisible Returns: bool: The iconsVisible """ if "iconsVisible" in self._prop_dict: return self._prop_dict["iconsVisible"] else: return None @icons_visible.setter def icons_visible(self, val): self._prop_dict["iconsVisible"] = val @property def protected_apps(self): """Gets and sets the protectedApps Returns: :class:`ProtectedAppsCollectionPage<onedrivesdk.request.protected_apps_collection.ProtectedAppsCollectionPage>`: The protectedApps """ if "protectedApps" in self._prop_dict: return ProtectedAppsCollectionPage(self._prop_dict["protectedApps"]) else: return None @property def exempt_apps(self): """Gets and sets the exemptApps Returns: :class:`ExemptAppsCollectionPage<onedrivesdk.request.exempt_apps_collection.ExemptAppsCollectionPage>`: The exemptApps """ if "exemptApps" in self._prop_dict: return ExemptAppsCollectionPage(self._prop_dict["exemptApps"]) else: return None @property def enterprise_network_domain_names(self): """Gets and sets the enterpriseNetworkDomainNames Returns: :class:`EnterpriseNetworkDomainNamesCollectionPage<onedrivesdk.request.enterprise_network_domain_names_collection.EnterpriseNetworkDomainNamesCollectionPage>`: The enterpriseNetworkDomainNames """ if "enterpriseNetworkDomainNames" in self._prop_dict: return EnterpriseNetworkDomainNamesCollectionPage(self._prop_dict["enterpriseNetworkDomainNames"]) else: return None @property def enterprise_proxied_domains(self): """Gets and sets the enterpriseProxiedDomains Returns: :class:`EnterpriseProxiedDomainsCollectionPage<onedrivesdk.request.enterprise_proxied_domains_collection.EnterpriseProxiedDomainsCollectionPage>`: The enterpriseProxiedDomains """ if "enterpriseProxiedDomains" in self._prop_dict: return EnterpriseProxiedDomainsCollectionPage(self._prop_dict["enterpriseProxiedDomains"]) else: return None @property def enterprise_ip_ranges(self): """Gets and sets the enterpriseIPRanges Returns: :class:`EnterpriseIPRangesCollectionPage<onedrivesdk.request.enterprise_ip_ranges_collection.EnterpriseIPRangesCollectionPage>`: The enterpriseIPRanges """ if "enterpriseIPRanges" in self._prop_dict: return EnterpriseIPRangesCollectionPage(self._prop_dict["enterpriseIPRanges"]) else: return None @property def enterprise_ip_ranges_are_authoritative(self): """ Gets and sets the enterpriseIPRangesAreAuthoritative Returns: bool: The enterpriseIPRangesAreAuthoritative """ if "enterpriseIPRangesAreAuthoritative" in self._prop_dict: return self._prop_dict["enterpriseIPRangesAreAuthoritative"] else: return None @enterprise_ip_ranges_are_authoritative.setter def enterprise_ip_ranges_are_authoritative(self, val): self._prop_dict["enterpriseIPRangesAreAuthoritative"] = val @property def enterprise_proxy_servers(self): """Gets and sets the enterpriseProxyServers Returns: :class:`EnterpriseProxyServersCollectionPage<onedrivesdk.request.enterprise_proxy_servers_collection.EnterpriseProxyServersCollectionPage>`: The enterpriseProxyServers """ if "enterpriseProxyServers" in self._prop_dict: return EnterpriseProxyServersCollectionPage(self._prop_dict["enterpriseProxyServers"]) else: return None @property def enterprise_internal_proxy_servers(self): """Gets and sets the enterpriseInternalProxyServers Returns: :class:`EnterpriseInternalProxyServersCollectionPage<onedrivesdk.request.enterprise_internal_proxy_servers_collection.EnterpriseInternalProxyServersCollectionPage>`: The enterpriseInternalProxyServers """ if "enterpriseInternalProxyServers" in self._prop_dict: return EnterpriseInternalProxyServersCollectionPage(self._prop_dict["enterpriseInternalProxyServers"]) else: return None @property def enterprise_proxy_servers_are_authoritative(self): """ Gets and sets the enterpriseProxyServersAreAuthoritative Returns: bool: The enterpriseProxyServersAreAuthoritative """ if "enterpriseProxyServersAreAuthoritative" in self._prop_dict: return self._prop_dict["enterpriseProxyServersAreAuthoritative"] else: return None @enterprise_proxy_servers_are_authoritative.setter def enterprise_proxy_servers_are_authoritative(self, val): self._prop_dict["enterpriseProxyServersAreAuthoritative"] = val @property def neutral_domain_resources(self): """Gets and sets the neutralDomainResources Returns: :class:`NeutralDomainResourcesCollectionPage<onedrivesdk.request.neutral_domain_resources_collection.NeutralDomainResourcesCollectionPage>`: The neutralDomainResources """ if "neutralDomainResources" in self._prop_dict: return NeutralDomainResourcesCollectionPage(self._prop_dict["neutralDomainResources"]) else: return None @property def indexing_encrypted_stores_or_items_blocked(self): """ Gets and sets the indexingEncryptedStoresOrItemsBlocked Returns: bool: The indexingEncryptedStoresOrItemsBlocked """ if "indexingEncryptedStoresOrItemsBlocked" in self._prop_dict: return self._prop_dict["indexingEncryptedStoresOrItemsBlocked"] else: return None @indexing_encrypted_stores_or_items_blocked.setter def indexing_encrypted_stores_or_items_blocked(self, val): self._prop_dict["indexingEncryptedStoresOrItemsBlocked"] = val @property def smb_auto_encrypted_file_extensions(self): """Gets and sets the smbAutoEncryptedFileExtensions Returns: :class:`SmbAutoEncryptedFileExtensionsCollectionPage<onedrivesdk.request.smb_auto_encrypted_file_extensions_collection.SmbAutoEncryptedFileExtensionsCollectionPage>`: The smbAutoEncryptedFileExtensions """ if "smbAutoEncryptedFileExtensions" in self._prop_dict: return SmbAutoEncryptedFileExtensionsCollectionPage(self._prop_dict["smbAutoEncryptedFileExtensions"]) else: return None @property def is_assigned(self): """ Gets and sets the isAssigned Returns: bool: The isAssigned """ if "isAssigned" in self._prop_dict: return self._prop_dict["isAssigned"] else: return None @is_assigned.setter def is_assigned(self, val): self._prop_dict["isAssigned"] = val @property def protected_app_locker_files(self): """Gets and sets the protectedAppLockerFiles Returns: :class:`ProtectedAppLockerFilesCollectionPage<onedrivesdk.request.protected_app_locker_files_collection.ProtectedAppLockerFilesCollectionPage>`: The protectedAppLockerFiles """ if "protectedAppLockerFiles" in self._prop_dict: return ProtectedAppLockerFilesCollectionPage(self._prop_dict["protectedAppLockerFiles"]) else: return None @property def exempt_app_locker_files(self): """Gets and sets the exemptAppLockerFiles Returns: :class:`ExemptAppLockerFilesCollectionPage<onedrivesdk.request.exempt_app_locker_files_collection.ExemptAppLockerFilesCollectionPage>`: The exemptAppLockerFiles """ if "exemptAppLockerFiles" in self._prop_dict: return ExemptAppLockerFilesCollectionPage(self._prop_dict["exemptAppLockerFiles"]) else: return None @property def assignments(self): """Gets and sets the assignments Returns: :class:`AssignmentsCollectionPage<onedrivesdk.request.assignments_collection.AssignmentsCollectionPage>`: The assignments """ if "assignments" in self._prop_dict: return AssignmentsCollectionPage(self._prop_dict["assignments"]) else: return None
# -*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. # # This file was generated and any changes will be overwritten. ''' from __future__ import unicode_literals from ..model.windows_information_protection_enforcement_level import WindowsInformationProtectionEnforcementLevel from ..model.windows_information_protection_resource_collection import WindowsInformationProtectionResourceCollection from ..model.windows_information_protection_data_recovery_certificate import WindowsInformationProtectionDataRecoveryCertificate from ..model.windows_information_protection_app import WindowsInformationProtectionApp from ..model.windows_information_protection_proxied_domain_collection import WindowsInformationProtectionProxiedDomainCollection from ..model.windows_information_protection_ip_range_collection import WindowsInformationProtectionIPRangeCollection from ..model.windows_information_protection_app_locker_file import WindowsInformationProtectionAppLockerFile from ..model.targeted_managed_app_policy_assignment import TargetedManagedAppPolicyAssignment from ..one_drive_object_base import OneDriveObjectBase class WindowsInformationProtection(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def enforcement_level(self): """ Gets and sets the enforcementLevel Returns: :class:`WindowsInformationProtectionEnforcementLevel<onedrivesdk.model.windows_information_protection_enforcement_level.WindowsInformationProtectionEnforcementLevel>`: The enforcementLevel """ if "enforcementLevel" in self._prop_dict: if isinstance(self._prop_dict["enforcementLevel"], OneDriveObjectBase): return self._prop_dict["enforcementLevel"] else : self._prop_dict["enforcementLevel"] = WindowsInformationProtectionEnforcementLevel(self._prop_dict["enforcementLevel"]) return self._prop_dict["enforcementLevel"] return None @enforcement_level.setter def enforcement_level(self, val): self._prop_dict["enforcementLevel"] = val @property def enterprise_domain(self): """ Gets and sets the enterpriseDomain Returns: str: The enterpriseDomain """ if "enterpriseDomain" in self._prop_dict: return self._prop_dict["enterpriseDomain"] else: return None @enterprise_domain.setter def enterprise_domain(self, val): self._prop_dict["enterpriseDomain"] = val @property def enterprise_protected_domain_names(self): """Gets and sets the enterpriseProtectedDomainNames Returns: :class:`EnterpriseProtectedDomainNamesCollectionPage<onedrivesdk.request.enterprise_protected_domain_names_collection.EnterpriseProtectedDomainNamesCollectionPage>`: The enterpriseProtectedDomainNames """ if "enterpriseProtectedDomainNames" in self._prop_dict: return EnterpriseProtectedDomainNamesCollectionPage(self._prop_dict["enterpriseProtectedDomainNames"]) else: return None @property def protection_under_lock_config_required(self): """ Gets and sets the protectionUnderLockConfigRequired Returns: bool: The protectionUnderLockConfigRequired """ if "protectionUnderLockConfigRequired" in self._prop_dict: return self._prop_dict["protectionUnderLockConfigRequired"] else: return None @protection_under_lock_config_required.setter def protection_under_lock_config_required(self, val): self._prop_dict["protectionUnderLockConfigRequired"] = val @property def data_recovery_certificate(self): """ Gets and sets the dataRecoveryCertificate Returns: :class:`WindowsInformationProtectionDataRecoveryCertificate<onedrivesdk.model.windows_information_protection_data_recovery_certificate.WindowsInformationProtectionDataRecoveryCertificate>`: The dataRecoveryCertificate """ if "dataRecoveryCertificate" in self._prop_dict: if isinstance(self._prop_dict["dataRecoveryCertificate"], OneDriveObjectBase): return self._prop_dict["dataRecoveryCertificate"] else : self._prop_dict["dataRecoveryCertificate"] = WindowsInformationProtectionDataRecoveryCertificate(self._prop_dict["dataRecoveryCertificate"]) return self._prop_dict["dataRecoveryCertificate"] return None @data_recovery_certificate.setter def data_recovery_certificate(self, val): self._prop_dict["dataRecoveryCertificate"] = val @property def revoke_on_unenroll_disabled(self): """ Gets and sets the revokeOnUnenrollDisabled Returns: bool: The revokeOnUnenrollDisabled """ if "revokeOnUnenrollDisabled" in self._prop_dict: return self._prop_dict["revokeOnUnenrollDisabled"] else: return None @revoke_on_unenroll_disabled.setter def revoke_on_unenroll_disabled(self, val): self._prop_dict["revokeOnUnenrollDisabled"] = val @property def rights_management_services_template_id(self): """ Gets and sets the rightsManagementServicesTemplateId Returns: UUID: The rightsManagementServicesTemplateId """ if "rightsManagementServicesTemplateId" in self._prop_dict: return self._prop_dict["rightsManagementServicesTemplateId"] else: return None @rights_management_services_template_id.setter def rights_management_services_template_id(self, val): self._prop_dict["rightsManagementServicesTemplateId"] = val @property def azure_rights_management_services_allowed(self): """ Gets and sets the azureRightsManagementServicesAllowed Returns: bool: The azureRightsManagementServicesAllowed """ if "azureRightsManagementServicesAllowed" in self._prop_dict: return self._prop_dict["azureRightsManagementServicesAllowed"] else: return None @azure_rights_management_services_allowed.setter def azure_rights_management_services_allowed(self, val): self._prop_dict["azureRightsManagementServicesAllowed"] = val @property def icons_visible(self): """ Gets and sets the iconsVisible Returns: bool: The iconsVisible """ if "iconsVisible" in self._prop_dict: return self._prop_dict["iconsVisible"] else: return None @icons_visible.setter def icons_visible(self, val): self._prop_dict["iconsVisible"] = val @property def protected_apps(self): """Gets and sets the protectedApps Returns: :class:`ProtectedAppsCollectionPage<onedrivesdk.request.protected_apps_collection.ProtectedAppsCollectionPage>`: The protectedApps """ if "protectedApps" in self._prop_dict: return ProtectedAppsCollectionPage(self._prop_dict["protectedApps"]) else: return None @property def exempt_apps(self): """Gets and sets the exemptApps Returns: :class:`ExemptAppsCollectionPage<onedrivesdk.request.exempt_apps_collection.ExemptAppsCollectionPage>`: The exemptApps """ if "exemptApps" in self._prop_dict: return ExemptAppsCollectionPage(self._prop_dict["exemptApps"]) else: return None @property def enterprise_network_domain_names(self): """Gets and sets the enterpriseNetworkDomainNames Returns: :class:`EnterpriseNetworkDomainNamesCollectionPage<onedrivesdk.request.enterprise_network_domain_names_collection.EnterpriseNetworkDomainNamesCollectionPage>`: The enterpriseNetworkDomainNames """ if "enterpriseNetworkDomainNames" in self._prop_dict: return EnterpriseNetworkDomainNamesCollectionPage(self._prop_dict["enterpriseNetworkDomainNames"]) else: return None @property def enterprise_proxied_domains(self): """Gets and sets the enterpriseProxiedDomains Returns: :class:`EnterpriseProxiedDomainsCollectionPage<onedrivesdk.request.enterprise_proxied_domains_collection.EnterpriseProxiedDomainsCollectionPage>`: The enterpriseProxiedDomains """ if "enterpriseProxiedDomains" in self._prop_dict: return EnterpriseProxiedDomainsCollectionPage(self._prop_dict["enterpriseProxiedDomains"]) else: return None @property def enterprise_ip_ranges(self): """Gets and sets the enterpriseIPRanges Returns: :class:`EnterpriseIPRangesCollectionPage<onedrivesdk.request.enterprise_ip_ranges_collection.EnterpriseIPRangesCollectionPage>`: The enterpriseIPRanges """ if "enterpriseIPRanges" in self._prop_dict: return EnterpriseIPRangesCollectionPage(self._prop_dict["enterpriseIPRanges"]) else: return None @property def enterprise_ip_ranges_are_authoritative(self): """ Gets and sets the enterpriseIPRangesAreAuthoritative Returns: bool: The enterpriseIPRangesAreAuthoritative """ if "enterpriseIPRangesAreAuthoritative" in self._prop_dict: return self._prop_dict["enterpriseIPRangesAreAuthoritative"] else: return None @enterprise_ip_ranges_are_authoritative.setter def enterprise_ip_ranges_are_authoritative(self, val): self._prop_dict["enterpriseIPRangesAreAuthoritative"] = val @property def enterprise_proxy_servers(self): """Gets and sets the enterpriseProxyServers Returns: :class:`EnterpriseProxyServersCollectionPage<onedrivesdk.request.enterprise_proxy_servers_collection.EnterpriseProxyServersCollectionPage>`: The enterpriseProxyServers """ if "enterpriseProxyServers" in self._prop_dict: return EnterpriseProxyServersCollectionPage(self._prop_dict["enterpriseProxyServers"]) else: return None @property def enterprise_internal_proxy_servers(self): """Gets and sets the enterpriseInternalProxyServers Returns: :class:`EnterpriseInternalProxyServersCollectionPage<onedrivesdk.request.enterprise_internal_proxy_servers_collection.EnterpriseInternalProxyServersCollectionPage>`: The enterpriseInternalProxyServers """ if "enterpriseInternalProxyServers" in self._prop_dict: return EnterpriseInternalProxyServersCollectionPage(self._prop_dict["enterpriseInternalProxyServers"]) else: return None @property def enterprise_proxy_servers_are_authoritative(self): """ Gets and sets the enterpriseProxyServersAreAuthoritative Returns: bool: The enterpriseProxyServersAreAuthoritative """ if "enterpriseProxyServersAreAuthoritative" in self._prop_dict: return self._prop_dict["enterpriseProxyServersAreAuthoritative"] else: return None @enterprise_proxy_servers_are_authoritative.setter def enterprise_proxy_servers_are_authoritative(self, val): self._prop_dict["enterpriseProxyServersAreAuthoritative"] = val @property def neutral_domain_resources(self): """Gets and sets the neutralDomainResources Returns: :class:`NeutralDomainResourcesCollectionPage<onedrivesdk.request.neutral_domain_resources_collection.NeutralDomainResourcesCollectionPage>`: The neutralDomainResources """ if "neutralDomainResources" in self._prop_dict: return NeutralDomainResourcesCollectionPage(self._prop_dict["neutralDomainResources"]) else: return None @property def indexing_encrypted_stores_or_items_blocked(self): """ Gets and sets the indexingEncryptedStoresOrItemsBlocked Returns: bool: The indexingEncryptedStoresOrItemsBlocked """ if "indexingEncryptedStoresOrItemsBlocked" in self._prop_dict: return self._prop_dict["indexingEncryptedStoresOrItemsBlocked"] else: return None @indexing_encrypted_stores_or_items_blocked.setter def indexing_encrypted_stores_or_items_blocked(self, val): self._prop_dict["indexingEncryptedStoresOrItemsBlocked"] = val @property def smb_auto_encrypted_file_extensions(self): """Gets and sets the smbAutoEncryptedFileExtensions Returns: :class:`SmbAutoEncryptedFileExtensionsCollectionPage<onedrivesdk.request.smb_auto_encrypted_file_extensions_collection.SmbAutoEncryptedFileExtensionsCollectionPage>`: The smbAutoEncryptedFileExtensions """ if "smbAutoEncryptedFileExtensions" in self._prop_dict: return SmbAutoEncryptedFileExtensionsCollectionPage(self._prop_dict["smbAutoEncryptedFileExtensions"]) else: return None @property def is_assigned(self): """ Gets and sets the isAssigned Returns: bool: The isAssigned """ if "isAssigned" in self._prop_dict: return self._prop_dict["isAssigned"] else: return None @is_assigned.setter def is_assigned(self, val): self._prop_dict["isAssigned"] = val @property def protected_app_locker_files(self): """Gets and sets the protectedAppLockerFiles Returns: :class:`ProtectedAppLockerFilesCollectionPage<onedrivesdk.request.protected_app_locker_files_collection.ProtectedAppLockerFilesCollectionPage>`: The protectedAppLockerFiles """ if "protectedAppLockerFiles" in self._prop_dict: return ProtectedAppLockerFilesCollectionPage(self._prop_dict["protectedAppLockerFiles"]) else: return None @property def exempt_app_locker_files(self): """Gets and sets the exemptAppLockerFiles Returns: :class:`ExemptAppLockerFilesCollectionPage<onedrivesdk.request.exempt_app_locker_files_collection.ExemptAppLockerFilesCollectionPage>`: The exemptAppLockerFiles """ if "exemptAppLockerFiles" in self._prop_dict: return ExemptAppLockerFilesCollectionPage(self._prop_dict["exemptAppLockerFiles"]) else: return None @property def assignments(self): """Gets and sets the assignments Returns: :class:`AssignmentsCollectionPage<onedrivesdk.request.assignments_collection.AssignmentsCollectionPage>`: The assignments """ if "assignments" in self._prop_dict: return AssignmentsCollectionPage(self._prop_dict["assignments"]) else: return None
en
0.441349
# -*- coding: utf-8 -*- # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. # # This file was generated and any changes will be overwritten. Gets and sets the enforcementLevel Returns: :class:`WindowsInformationProtectionEnforcementLevel<onedrivesdk.model.windows_information_protection_enforcement_level.WindowsInformationProtectionEnforcementLevel>`: The enforcementLevel Gets and sets the enterpriseDomain Returns: str: The enterpriseDomain Gets and sets the enterpriseProtectedDomainNames Returns: :class:`EnterpriseProtectedDomainNamesCollectionPage<onedrivesdk.request.enterprise_protected_domain_names_collection.EnterpriseProtectedDomainNamesCollectionPage>`: The enterpriseProtectedDomainNames Gets and sets the protectionUnderLockConfigRequired Returns: bool: The protectionUnderLockConfigRequired Gets and sets the dataRecoveryCertificate Returns: :class:`WindowsInformationProtectionDataRecoveryCertificate<onedrivesdk.model.windows_information_protection_data_recovery_certificate.WindowsInformationProtectionDataRecoveryCertificate>`: The dataRecoveryCertificate Gets and sets the revokeOnUnenrollDisabled Returns: bool: The revokeOnUnenrollDisabled Gets and sets the rightsManagementServicesTemplateId Returns: UUID: The rightsManagementServicesTemplateId Gets and sets the azureRightsManagementServicesAllowed Returns: bool: The azureRightsManagementServicesAllowed Gets and sets the iconsVisible Returns: bool: The iconsVisible Gets and sets the protectedApps Returns: :class:`ProtectedAppsCollectionPage<onedrivesdk.request.protected_apps_collection.ProtectedAppsCollectionPage>`: The protectedApps Gets and sets the exemptApps Returns: :class:`ExemptAppsCollectionPage<onedrivesdk.request.exempt_apps_collection.ExemptAppsCollectionPage>`: The exemptApps Gets and sets the enterpriseNetworkDomainNames Returns: :class:`EnterpriseNetworkDomainNamesCollectionPage<onedrivesdk.request.enterprise_network_domain_names_collection.EnterpriseNetworkDomainNamesCollectionPage>`: The enterpriseNetworkDomainNames Gets and sets the enterpriseProxiedDomains Returns: :class:`EnterpriseProxiedDomainsCollectionPage<onedrivesdk.request.enterprise_proxied_domains_collection.EnterpriseProxiedDomainsCollectionPage>`: The enterpriseProxiedDomains Gets and sets the enterpriseIPRanges Returns: :class:`EnterpriseIPRangesCollectionPage<onedrivesdk.request.enterprise_ip_ranges_collection.EnterpriseIPRangesCollectionPage>`: The enterpriseIPRanges Gets and sets the enterpriseIPRangesAreAuthoritative Returns: bool: The enterpriseIPRangesAreAuthoritative Gets and sets the enterpriseProxyServers Returns: :class:`EnterpriseProxyServersCollectionPage<onedrivesdk.request.enterprise_proxy_servers_collection.EnterpriseProxyServersCollectionPage>`: The enterpriseProxyServers Gets and sets the enterpriseInternalProxyServers Returns: :class:`EnterpriseInternalProxyServersCollectionPage<onedrivesdk.request.enterprise_internal_proxy_servers_collection.EnterpriseInternalProxyServersCollectionPage>`: The enterpriseInternalProxyServers Gets and sets the enterpriseProxyServersAreAuthoritative Returns: bool: The enterpriseProxyServersAreAuthoritative Gets and sets the neutralDomainResources Returns: :class:`NeutralDomainResourcesCollectionPage<onedrivesdk.request.neutral_domain_resources_collection.NeutralDomainResourcesCollectionPage>`: The neutralDomainResources Gets and sets the indexingEncryptedStoresOrItemsBlocked Returns: bool: The indexingEncryptedStoresOrItemsBlocked Gets and sets the smbAutoEncryptedFileExtensions Returns: :class:`SmbAutoEncryptedFileExtensionsCollectionPage<onedrivesdk.request.smb_auto_encrypted_file_extensions_collection.SmbAutoEncryptedFileExtensionsCollectionPage>`: The smbAutoEncryptedFileExtensions Gets and sets the isAssigned Returns: bool: The isAssigned Gets and sets the protectedAppLockerFiles Returns: :class:`ProtectedAppLockerFilesCollectionPage<onedrivesdk.request.protected_app_locker_files_collection.ProtectedAppLockerFilesCollectionPage>`: The protectedAppLockerFiles Gets and sets the exemptAppLockerFiles Returns: :class:`ExemptAppLockerFilesCollectionPage<onedrivesdk.request.exempt_app_locker_files_collection.ExemptAppLockerFilesCollectionPage>`: The exemptAppLockerFiles Gets and sets the assignments Returns: :class:`AssignmentsCollectionPage<onedrivesdk.request.assignments_collection.AssignmentsCollectionPage>`: The assignments
1.815163
2
selenium_test.py
cmput401-fall2018/web-app-ci-cd-with-travis-ci-aabels
0
6617572
import unittest from selenium import webdriver from selenium.webdriver.common.keys import Keys class WebResumeSearch(unittest.TestCase): def setUp(self): self.browser = webdriver.Firefox() def test_home(self): browser = self.browser browser.get("http://192.168.127.12:8000/") contact = browser.find_element_by_id("contact") about = browser.find_element_by_id("about") education = browser.find_element_by_id("education") skills = browser.find_element_by_id("skills") work = browser.find_element_by_id("work") name = browser.find_element_by_id("name") assert browser.find_element_by_id("contact") != None assert browser.find_element_by_id("about") != None assert browser.find_element_by_id("education") != None assert browser.find_element_by_id("skills") != None assert browser.find_element_by_id("work") != None assert browser.find_element_by_id("name") != None def tearDown(self): self.browser.close() if __name__ == "__main__": unittest.main()
import unittest from selenium import webdriver from selenium.webdriver.common.keys import Keys class WebResumeSearch(unittest.TestCase): def setUp(self): self.browser = webdriver.Firefox() def test_home(self): browser = self.browser browser.get("http://192.168.127.12:8000/") contact = browser.find_element_by_id("contact") about = browser.find_element_by_id("about") education = browser.find_element_by_id("education") skills = browser.find_element_by_id("skills") work = browser.find_element_by_id("work") name = browser.find_element_by_id("name") assert browser.find_element_by_id("contact") != None assert browser.find_element_by_id("about") != None assert browser.find_element_by_id("education") != None assert browser.find_element_by_id("skills") != None assert browser.find_element_by_id("work") != None assert browser.find_element_by_id("name") != None def tearDown(self): self.browser.close() if __name__ == "__main__": unittest.main()
none
1
2.985028
3
pyfos/pyfos_brocade_license.py
madhavinaiduprathap/pyfosbrocade
0
6617573
# Copyright 2018 Brocade Communications Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may also obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`pyfos_brocade_license` - PyFOS module to provide rest support for\ Licenses installed on the specific switch in the fabric. **************************************************************************\ ************************************************************************** The :mod:`pyfos_brocade_license` provides the REST support for License. """ from pyfos import pyfos_rest_util from pyfos.pyfos_type import pyfos_type import pyfos.pyfos_version as version # pylint: disable=W0622 class license(pyfos_rest_util.rest_object): """Class of licenses installed on a switch Important class members: +---------------------------+------------------------------+----------------------------------------------------------+ | Attribute name | Description |Frequently used methods | +===========================+==============================+==========================================================+ | name | The license key for one or |:meth:`peek_name` | | | morefeatures installed on | | | | the switch | | +---------------------------+------------------------------+----------------------------------------------------------+ | features/feature | The list of features that |:meth:`peek_features_feature` | | | that are integrated in a | | | | single license key | | +---------------------------+------------------------------+----------------------------------------------------------+ | capacity | The capacity of the license |:meth:`peek_capacity` | | | installed on the switch.This | | | | field is displayed only for | | | | the capacity based license | | | | in the output. | | +---------------------------+------------------------------+----------------------------------------------------------+ | consumed | The number of consumed slots |:meth:`peek_consumed` | | | of the license installed | | | | on the switch. | | +---------------------------+------------------------------+----------------------------------------------------------+ | configured-blade-slots/ | The list of Configured Blade |:meth:`peek_configured_blade_slots_configured_blade_slot` | | configured-blade-slot | Slots of the specific license| | | | installed on the switch. | | +---------------------------+------------------------------+----------------------------------------------------------+ | expiration-date | The expiry date of the |:meth:`peek_expiration_date` | | | specific license installed | | | | on the system. | | | | the format of the date | | | | is 'MM/DD/YYYY'. | | +---------------------------+------------------------------+----------------------------------------------------------+ *Object methods* .. classmethod:: get(session) Returns a :class:`license` object filled with attributes gathered throuth the session passed in. Each object can be printed using :func:`pyfos_util.response_print` and individual attributes accessed through peek methods. :param session: session handler returned by :func:`utils.brcd_util.getsession` :rtype: a :class:`license` object *Attribute methods* .. method:: peek_name() Reads the license key string from the license Object :rtype: None on error and value on success .. method:: peek_features_feature() Reads the list of features from the license Object :rtype: None on error and value on success .. method:: peek_capacity() Reads the capacity from the license Object :rtype: None on error and value on success .. method:: peek_consumed() Reads the consumed slots from the license Object :rtype: None on error and value on success .. method:: peek_configured_blade_slots_configured_blade_slot() Reads the configured blade slots from the license Object :rtype: None on error and value on success .. method:: peek_expiration_date() Reads the expiration date from the license Object :rtype: None on error and value on success """ def __init__(self, dictvalues={}): super().__init__(pyfos_rest_util.rest_obj_type.license, "/rest/running/brocade-license/license", version.VER_RANGE_821b_and_ABOVE) self.add(pyfos_rest_util.rest_attribute("name", pyfos_type.type_str, None, pyfos_rest_util.REST_ATTRIBUTE_KEY)) self.add(pyfos_rest_util.rest_attribute("features", pyfos_type.type_na, dict(), pyfos_rest_util.REST_ATTRIBUTE_CONTAINER)) self.add(pyfos_rest_util.rest_attribute("feature", pyfos_type.type_na, None, pyfos_rest_util.REST_ATTRIBUTE_LEAF_LIST), ["features"]) self.add(pyfos_rest_util.rest_attribute("capacity", pyfos_type.type_int, None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG)) self.add(pyfos_rest_util.rest_attribute("consumed", pyfos_type.type_int, None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG)) self.add(pyfos_rest_util.rest_attribute("configured-blade-slots", pyfos_type.type_na, dict(), pyfos_rest_util.REST_ATTRIBUTE_CONTAINER)) self.add(pyfos_rest_util.rest_attribute("configured-blade-slot", pyfos_type.type_na, None, pyfos_rest_util.REST_ATTRIBUTE_LEAF_LIST), ["configured-blade-slots"]) self.add(pyfos_rest_util.rest_attribute("expiration-date", pyfos_type.type_str, None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG)) self.load(dictvalues, 1)
# Copyright 2018 Brocade Communications Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may also obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`pyfos_brocade_license` - PyFOS module to provide rest support for\ Licenses installed on the specific switch in the fabric. **************************************************************************\ ************************************************************************** The :mod:`pyfos_brocade_license` provides the REST support for License. """ from pyfos import pyfos_rest_util from pyfos.pyfos_type import pyfos_type import pyfos.pyfos_version as version # pylint: disable=W0622 class license(pyfos_rest_util.rest_object): """Class of licenses installed on a switch Important class members: +---------------------------+------------------------------+----------------------------------------------------------+ | Attribute name | Description |Frequently used methods | +===========================+==============================+==========================================================+ | name | The license key for one or |:meth:`peek_name` | | | morefeatures installed on | | | | the switch | | +---------------------------+------------------------------+----------------------------------------------------------+ | features/feature | The list of features that |:meth:`peek_features_feature` | | | that are integrated in a | | | | single license key | | +---------------------------+------------------------------+----------------------------------------------------------+ | capacity | The capacity of the license |:meth:`peek_capacity` | | | installed on the switch.This | | | | field is displayed only for | | | | the capacity based license | | | | in the output. | | +---------------------------+------------------------------+----------------------------------------------------------+ | consumed | The number of consumed slots |:meth:`peek_consumed` | | | of the license installed | | | | on the switch. | | +---------------------------+------------------------------+----------------------------------------------------------+ | configured-blade-slots/ | The list of Configured Blade |:meth:`peek_configured_blade_slots_configured_blade_slot` | | configured-blade-slot | Slots of the specific license| | | | installed on the switch. | | +---------------------------+------------------------------+----------------------------------------------------------+ | expiration-date | The expiry date of the |:meth:`peek_expiration_date` | | | specific license installed | | | | on the system. | | | | the format of the date | | | | is 'MM/DD/YYYY'. | | +---------------------------+------------------------------+----------------------------------------------------------+ *Object methods* .. classmethod:: get(session) Returns a :class:`license` object filled with attributes gathered throuth the session passed in. Each object can be printed using :func:`pyfos_util.response_print` and individual attributes accessed through peek methods. :param session: session handler returned by :func:`utils.brcd_util.getsession` :rtype: a :class:`license` object *Attribute methods* .. method:: peek_name() Reads the license key string from the license Object :rtype: None on error and value on success .. method:: peek_features_feature() Reads the list of features from the license Object :rtype: None on error and value on success .. method:: peek_capacity() Reads the capacity from the license Object :rtype: None on error and value on success .. method:: peek_consumed() Reads the consumed slots from the license Object :rtype: None on error and value on success .. method:: peek_configured_blade_slots_configured_blade_slot() Reads the configured blade slots from the license Object :rtype: None on error and value on success .. method:: peek_expiration_date() Reads the expiration date from the license Object :rtype: None on error and value on success """ def __init__(self, dictvalues={}): super().__init__(pyfos_rest_util.rest_obj_type.license, "/rest/running/brocade-license/license", version.VER_RANGE_821b_and_ABOVE) self.add(pyfos_rest_util.rest_attribute("name", pyfos_type.type_str, None, pyfos_rest_util.REST_ATTRIBUTE_KEY)) self.add(pyfos_rest_util.rest_attribute("features", pyfos_type.type_na, dict(), pyfos_rest_util.REST_ATTRIBUTE_CONTAINER)) self.add(pyfos_rest_util.rest_attribute("feature", pyfos_type.type_na, None, pyfos_rest_util.REST_ATTRIBUTE_LEAF_LIST), ["features"]) self.add(pyfos_rest_util.rest_attribute("capacity", pyfos_type.type_int, None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG)) self.add(pyfos_rest_util.rest_attribute("consumed", pyfos_type.type_int, None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG)) self.add(pyfos_rest_util.rest_attribute("configured-blade-slots", pyfos_type.type_na, dict(), pyfos_rest_util.REST_ATTRIBUTE_CONTAINER)) self.add(pyfos_rest_util.rest_attribute("configured-blade-slot", pyfos_type.type_na, None, pyfos_rest_util.REST_ATTRIBUTE_LEAF_LIST), ["configured-blade-slots"]) self.add(pyfos_rest_util.rest_attribute("expiration-date", pyfos_type.type_str, None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG)) self.load(dictvalues, 1)
en
0.629905
# Copyright 2018 Brocade Communications Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may also obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. :mod:`pyfos_brocade_license` - PyFOS module to provide rest support for\ Licenses installed on the specific switch in the fabric. **************************************************************************\ ************************************************************************** The :mod:`pyfos_brocade_license` provides the REST support for License. # pylint: disable=W0622 Class of licenses installed on a switch Important class members: +---------------------------+------------------------------+----------------------------------------------------------+ | Attribute name | Description |Frequently used methods | +===========================+==============================+==========================================================+ | name | The license key for one or |:meth:`peek_name` | | | morefeatures installed on | | | | the switch | | +---------------------------+------------------------------+----------------------------------------------------------+ | features/feature | The list of features that |:meth:`peek_features_feature` | | | that are integrated in a | | | | single license key | | +---------------------------+------------------------------+----------------------------------------------------------+ | capacity | The capacity of the license |:meth:`peek_capacity` | | | installed on the switch.This | | | | field is displayed only for | | | | the capacity based license | | | | in the output. | | +---------------------------+------------------------------+----------------------------------------------------------+ | consumed | The number of consumed slots |:meth:`peek_consumed` | | | of the license installed | | | | on the switch. | | +---------------------------+------------------------------+----------------------------------------------------------+ | configured-blade-slots/ | The list of Configured Blade |:meth:`peek_configured_blade_slots_configured_blade_slot` | | configured-blade-slot | Slots of the specific license| | | | installed on the switch. | | +---------------------------+------------------------------+----------------------------------------------------------+ | expiration-date | The expiry date of the |:meth:`peek_expiration_date` | | | specific license installed | | | | on the system. | | | | the format of the date | | | | is 'MM/DD/YYYY'. | | +---------------------------+------------------------------+----------------------------------------------------------+ *Object methods* .. classmethod:: get(session) Returns a :class:`license` object filled with attributes gathered throuth the session passed in. Each object can be printed using :func:`pyfos_util.response_print` and individual attributes accessed through peek methods. :param session: session handler returned by :func:`utils.brcd_util.getsession` :rtype: a :class:`license` object *Attribute methods* .. method:: peek_name() Reads the license key string from the license Object :rtype: None on error and value on success .. method:: peek_features_feature() Reads the list of features from the license Object :rtype: None on error and value on success .. method:: peek_capacity() Reads the capacity from the license Object :rtype: None on error and value on success .. method:: peek_consumed() Reads the consumed slots from the license Object :rtype: None on error and value on success .. method:: peek_configured_blade_slots_configured_blade_slot() Reads the configured blade slots from the license Object :rtype: None on error and value on success .. method:: peek_expiration_date() Reads the expiration date from the license Object :rtype: None on error and value on success
1.799788
2
gcs_client/credentials.py
Akrog/gcs-client
17
6617574
# -*- coding: utf-8 -*- # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import json from oauth2client import client as oauth2_client from gcs_client import constants from gcs_client import errors class Credentials(oauth2_client.SignedJwtAssertionCredentials): """GCS Credentials used to access servers.""" common_url = 'https://www.googleapis.com/auth/' scope_urls = { constants.SCOPE_READER: 'devstorage.read_only', constants.SCOPE_WRITER: 'devstorage.read_write', constants.SCOPE_OWNER: 'devstorage.full_control', constants.SCOPE_CLOUD: 'cloud-platform', } def __init__(self, key_file_name, email=None, scope=constants.SCOPE_OWNER): """Initialize credentials used for all GCS operations. Create OAuth 2.0 credentials to access GCS from a JSON file or a P12 and email address. Since this library is meant to work outside of Google App Engine and Google Compute Engine, you must obtain these credential files in the Google Developers Console. To generate service-account credentials, or to view the public credentials that you've already generated, do the following: 1. Open the Credentials page. 2. To set up a new service account, do the following: a. Click Add credentials > Service account. b. Choose whether to download the service account's public/private key as a JSON file (preferred) or standard P12 file. Your new public/private key pair is generated and downloaded to your machine; it serves as the only copy of this key. You are responsible for storing it securely. You can return to the Developers Console at any time to view the client ID, email address, and public key fingerprints, or to generate additional public/private key pairs. For more details about service account credentials in the Developers Console, see Service accounts in the Developers Console help file. :param key_file_name: Name of the file with the credentials to use. :type key_file_name: String :param email: Service account's Email address to use with P12 file. When using JSON files this argument will be ignored. :type email: String :param scope: Scopes that the credentials should be granted access to. Value must be one of Credentials.scope_urls.keys() :type scope: String """ if scope not in self.scope_urls: raise errors.Credentials('scope must be one of %s' % self.scope_urls.keys()) self.scope = scope try: with open(key_file_name, 'r') as f: key_data = f.read() except IOError: raise errors.Credentials( 'Could not read data from private key file %s.', key_file_name) try: json_data = json.loads(key_data) key_data = json_data['private_key'] email = json_data['client_email'] except Exception: if not email: raise errors.Credentials( 'Non JSON private key needs email, but it was missing') url = self.common_url + self.scope_urls[scope] super(Credentials, self).__init__(email, key_data, url) @property def authorization(self): """Authorization header value for GCS requests.""" if not self.access_token or self.access_token_expired: self.get_access_token() return 'Bearer ' + self.access_token
# -*- coding: utf-8 -*- # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import json from oauth2client import client as oauth2_client from gcs_client import constants from gcs_client import errors class Credentials(oauth2_client.SignedJwtAssertionCredentials): """GCS Credentials used to access servers.""" common_url = 'https://www.googleapis.com/auth/' scope_urls = { constants.SCOPE_READER: 'devstorage.read_only', constants.SCOPE_WRITER: 'devstorage.read_write', constants.SCOPE_OWNER: 'devstorage.full_control', constants.SCOPE_CLOUD: 'cloud-platform', } def __init__(self, key_file_name, email=None, scope=constants.SCOPE_OWNER): """Initialize credentials used for all GCS operations. Create OAuth 2.0 credentials to access GCS from a JSON file or a P12 and email address. Since this library is meant to work outside of Google App Engine and Google Compute Engine, you must obtain these credential files in the Google Developers Console. To generate service-account credentials, or to view the public credentials that you've already generated, do the following: 1. Open the Credentials page. 2. To set up a new service account, do the following: a. Click Add credentials > Service account. b. Choose whether to download the service account's public/private key as a JSON file (preferred) or standard P12 file. Your new public/private key pair is generated and downloaded to your machine; it serves as the only copy of this key. You are responsible for storing it securely. You can return to the Developers Console at any time to view the client ID, email address, and public key fingerprints, or to generate additional public/private key pairs. For more details about service account credentials in the Developers Console, see Service accounts in the Developers Console help file. :param key_file_name: Name of the file with the credentials to use. :type key_file_name: String :param email: Service account's Email address to use with P12 file. When using JSON files this argument will be ignored. :type email: String :param scope: Scopes that the credentials should be granted access to. Value must be one of Credentials.scope_urls.keys() :type scope: String """ if scope not in self.scope_urls: raise errors.Credentials('scope must be one of %s' % self.scope_urls.keys()) self.scope = scope try: with open(key_file_name, 'r') as f: key_data = f.read() except IOError: raise errors.Credentials( 'Could not read data from private key file %s.', key_file_name) try: json_data = json.loads(key_data) key_data = json_data['private_key'] email = json_data['client_email'] except Exception: if not email: raise errors.Credentials( 'Non JSON private key needs email, but it was missing') url = self.common_url + self.scope_urls[scope] super(Credentials, self).__init__(email, key_data, url) @property def authorization(self): """Authorization header value for GCS requests.""" if not self.access_token or self.access_token_expired: self.get_access_token() return 'Bearer ' + self.access_token
en
0.846862
# -*- coding: utf-8 -*- # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. GCS Credentials used to access servers. Initialize credentials used for all GCS operations. Create OAuth 2.0 credentials to access GCS from a JSON file or a P12 and email address. Since this library is meant to work outside of Google App Engine and Google Compute Engine, you must obtain these credential files in the Google Developers Console. To generate service-account credentials, or to view the public credentials that you've already generated, do the following: 1. Open the Credentials page. 2. To set up a new service account, do the following: a. Click Add credentials > Service account. b. Choose whether to download the service account's public/private key as a JSON file (preferred) or standard P12 file. Your new public/private key pair is generated and downloaded to your machine; it serves as the only copy of this key. You are responsible for storing it securely. You can return to the Developers Console at any time to view the client ID, email address, and public key fingerprints, or to generate additional public/private key pairs. For more details about service account credentials in the Developers Console, see Service accounts in the Developers Console help file. :param key_file_name: Name of the file with the credentials to use. :type key_file_name: String :param email: Service account's Email address to use with P12 file. When using JSON files this argument will be ignored. :type email: String :param scope: Scopes that the credentials should be granted access to. Value must be one of Credentials.scope_urls.keys() :type scope: String Authorization header value for GCS requests.
2.037139
2
Lang/Python/site-package/PyQt5/0001_PyQt5_Widget3.py
Orig5826/Basics
5
6617575
<filename>Lang/Python/site-package/PyQt5/0001_PyQt5_Widget3.py import sys from PyQt5.QtWidgets import (QApplication, QWidget, QFrame, QGridLayout, QProgressBar, QPushButton, QCalendarWidget, QLabel) from PyQt5.QtGui import QIcon, QFont, QColor from PyQt5.QtCore import Qt, QBasicTimer, QDate """ 进度条的应用 感谢:Archi - 高手成长之路 参考来源:https://www.cnblogs.com/archisama/p/5465104.html """ class Windows(QWidget): def __init__(self): super().__init__() self.initUI() self.finished = False def initUI(self): self.setWindowTitle('进度条示例') self.setWindowIcon(QIcon('./res/apaki.ico')) self.setGeometry(400, 300, 400, 300) self.pbar = QProgressBar(self) self.btn = QPushButton("开始下载", self) self.btn.clicked.connect(self.doAction) self.timer = QBasicTimer() self.step = 0 # ------------------------------------------------- cal = QCalendarWidget(self) cal.clicked[QDate].connect(self.showData) date = cal.selectedDate() self.label = QLabel(self) self.label.setText(date.toString()) grid = QGridLayout(self) grid.addWidget(self.pbar, 0, 1) grid.addWidget(self.btn, 0, 0) grid.addWidget(self.label, 2, 0, 1, 2) grid.addWidget(cal, 3, 0, 1, 2) self.setLayout(grid) def timerEvent(self, event): if self.step >= 100: self.timer.stop() self.btn.setText('下载完成') self.finished = True return self.step += 1 self.pbar.setValue(self.step) def doAction(self): if self.finished is not True: if self.timer.isActive(): self.timer.stop() self.btn.setText('继续') else: self.timer.start(100, self) self.btn.setText('停止') def showData(self, date): self.label.setText(date.toString()) if __name__ == "__main__": app = QApplication(sys.argv) w = Windows() w.show() sys.exit(app.exec_())
<filename>Lang/Python/site-package/PyQt5/0001_PyQt5_Widget3.py import sys from PyQt5.QtWidgets import (QApplication, QWidget, QFrame, QGridLayout, QProgressBar, QPushButton, QCalendarWidget, QLabel) from PyQt5.QtGui import QIcon, QFont, QColor from PyQt5.QtCore import Qt, QBasicTimer, QDate """ 进度条的应用 感谢:Archi - 高手成长之路 参考来源:https://www.cnblogs.com/archisama/p/5465104.html """ class Windows(QWidget): def __init__(self): super().__init__() self.initUI() self.finished = False def initUI(self): self.setWindowTitle('进度条示例') self.setWindowIcon(QIcon('./res/apaki.ico')) self.setGeometry(400, 300, 400, 300) self.pbar = QProgressBar(self) self.btn = QPushButton("开始下载", self) self.btn.clicked.connect(self.doAction) self.timer = QBasicTimer() self.step = 0 # ------------------------------------------------- cal = QCalendarWidget(self) cal.clicked[QDate].connect(self.showData) date = cal.selectedDate() self.label = QLabel(self) self.label.setText(date.toString()) grid = QGridLayout(self) grid.addWidget(self.pbar, 0, 1) grid.addWidget(self.btn, 0, 0) grid.addWidget(self.label, 2, 0, 1, 2) grid.addWidget(cal, 3, 0, 1, 2) self.setLayout(grid) def timerEvent(self, event): if self.step >= 100: self.timer.stop() self.btn.setText('下载完成') self.finished = True return self.step += 1 self.pbar.setValue(self.step) def doAction(self): if self.finished is not True: if self.timer.isActive(): self.timer.stop() self.btn.setText('继续') else: self.timer.start(100, self) self.btn.setText('停止') def showData(self, date): self.label.setText(date.toString()) if __name__ == "__main__": app = QApplication(sys.argv) w = Windows() w.show() sys.exit(app.exec_())
zh
0.388906
进度条的应用 感谢:Archi - 高手成长之路 参考来源:https://www.cnblogs.com/archisama/p/5465104.html # -------------------------------------------------
2.401448
2
create_2d_bbox_room_api.py
Synthesis-AI-Dev/synthesis-bbox-room-api
1
6617576
import argparse import itertools import json import concurrent.futures import random from tqdm import tqdm from pathlib import Path import cv2 import numpy as np # Input SUFFIX_RGB = '.rgb.png' # RGB image. Used to visualize the bounding boxes SUFFIX_SEGID = '.segments.png' SUFFIX_INFO = '.info.json' JSON_OBJ_ID_KEY = 'mask_id' # In json, this param contains the ID of the object in the mask JSON_OBJ_LIST_KEY = 'objects' # In json, this param contains list of the object JSON_HAZARD_IDENTIFIER_KEY = 'position' # Only hazards will have this param # Output SUFFIX_BBOX = '.bbox.json' SUFFIX_VIZ = '.bbox.jpg' # Viz of bboxes on RGB image SEED_RANDOM = 0 def main(args): """Creates 2D Bounding Box for each object in an image from renders of Room API The info.json files in the Room API contain 3D bounding boxes for each object. Objects do not include the floor and walls. They can be shoes, belts, etc. We project the 3D bounding boxes to 2D bounding boxes in the image plane using the camera intrinsics and extrinsics and optionally visualize the boxes. """ dir_src = Path(args.src_dir) if not dir_src.exists() or not dir_src.is_dir(): raise ValueError(f"The given directory was not found: {dir_src}") if args.dst_dir: dir_dst = Path(args.dst_dir) if not dir_dst.exists(): print(f"Creating output directory: {dir_dst}") dir_dst.mkdir(parents=True) else: dir_dst = dir_src list_infos = list(sorted(dir_src.glob('*' + SUFFIX_INFO))) list_segids = list(sorted(dir_src.glob('*' + SUFFIX_SEGID))) num_infos = len(list_infos) num_segids = len(list_segids) if num_infos < 1: raise ValueError(f"No {SUFFIX_INFO} files found in dir: {dir_src}") if num_segids < 1: raise ValueError(f"No {SUFFIX_SEGID} files found in dir: {dir_src}") if num_segids != num_infos: print(f"Error: The number of segmentindex files ({num_segids}) does not match" f" the number of info files ({num_infos}).\n Calculating mismatching files...") img_nums_segid = [_img.name[:-len(SUFFIX_SEGID)] for _img in list_segids] img_nums_info = [_img.name[:-len(SUFFIX_INFO)] for _img in list_infos] raise ValueError(f"Mismatch in number of segmentindex and info files. These are the mismatching img numbers:\n" f"{list(set(img_nums_segid) ^ set(img_nums_info))}") print(f'Creating bounding boxes: ({SUFFIX_BBOX})\n' f' Num Info files found ({SUFFIX_INFO}): {num_infos}\n' f' Num Segments files found ({SUFFIX_SEGID}): {num_segids}\n' f' Output Dir: {dir_dst}\n') if args.debug_viz_bbox_mask: create_viz = True list_rgb = list(sorted(dir_src.glob('*' + SUFFIX_RGB))) num_rgb = len(list_rgb) if num_rgb != num_infos: print(f"Error: The number of RGB files ({num_rgb}) does not match" f" the number of info files ({num_infos}).") img_nums_rgb = [_img.name[:-len(SUFFIX_RGB)] for _img in list_rgb] img_nums_info = [_img.name[:-len(SUFFIX_INFO)] for _img in list_infos] raise ValueError( f"Mismatch in number of rgb and info files. These are the mismatching img numbers:\n" f"{list(set(img_nums_rgb) ^ set(img_nums_info))}") print(f'Creating bounding boxes visualizations ({SUFFIX_VIZ}).\n' f' Num RGB files found ({SUFFIX_RGB}): {num_rgb}\n' f' Output Dir: {dir_dst}\n' f' WARNING: Creating visualizations can be slow\n') else: create_viz = False list_rgb = itertools.repeat(None) if args.workers > 0: max_workers = args.workers else: max_workers = None with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: with tqdm(total=len(list_segids)) as pbar: for _ in executor.map(export_bbox_json, list_segids, list_infos, itertools.repeat(dir_dst), itertools.repeat(create_viz), list_rgb): # Catch any error raised in processes pbar.update() def export_bbox_json(file_segid, file_info_json, dst_dir, create_viz=False, file_rgb=None): """Export a json file containing info about bounding box of each object in a container from a given segmentindex and info.json file The segmentindex and info.json file should have the same image number. The exported json with bounding box info will also have the same image number. Args: file_segid (str or pathlib.Path): Path to png file containing masks of all objects file_info_json (str or pathlib.Path): Path to a info.json file containing info camera intrinsics+extrinsics as well as of all objects in the scene. dst_dir (str or pathlib.Path): The directory where json files with the 2D bboxes will be exported create_viz (Optional, bool): If given, will create a visualization of the mask and bounding box of EVERY valid object in EVERY image. Warning: This is very slow. file_rgb (Optional, str or pathlib.Path): If create_viz is true, the RGB file is used to visualize the bounding boxes. """ # Check inputs file_segid = Path(file_segid) file_info_json = Path(file_info_json) dir_dst = Path(dst_dir) img_num_segid = file_segid.name[:-len(SUFFIX_SEGID)] img_num_info = file_info_json.name[:-len(SUFFIX_INFO)] if img_num_segid != img_num_info: raise ValueError(f"The image number of segid file ({img_num_segid}) and info file ({img_num_info})" f"are different: {file_segid} {file_info_json}") if not dir_dst.exists(): print(f"Creating output dir: {dir_dst}") dir_dst.mkdir(parents=True) # Read input data segid = cv2.imread(str(file_segid), cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) with open(file_info_json) as json_file: info = json.load(json_file) if create_viz: if file_rgb is None: raise ValueError('No RGB image given. For visuzalizing bboxes, RGB image should be passed.') img_rgb = cv2.imread(str(file_rgb)) new_obj_dict = {JSON_OBJ_LIST_KEY: []} # Stores info that will be output random.seed(SEED_RANDOM) # Process all objects in the scene for obj in info[JSON_OBJ_LIST_KEY]: # Static parts of the scene like walls and floor are also among the list of objects in the json. # However, should ignore them. We filter them out based on the fact that they do not have # parameters such as 'position'. if JSON_HAZARD_IDENTIFIER_KEY not in obj: continue # Get ID of the object in the mask obj_id = obj[JSON_OBJ_ID_KEY] # Get mask of the object mask_obj = (segid == obj_id).astype(np.uint8) # Convert bool to int if np.count_nonzero(mask_obj) == 0: # If mask is empty, skip this object continue # Get bounding box from mask maskx = np.any(mask_obj, axis=0) masky = np.any(mask_obj, axis=1) x1 = np.argmax(maskx) y1 = np.argmax(masky) x2 = len(maskx) - np.argmax(maskx[::-1]) y2 = len(masky) - np.argmax(masky[::-1]) bbox_obj = {"bounding-box-2d": {"x_min": int(x1), "x_max": int(x2), "y_min": int(y1), "y_max": int(y2)}} obj.update(bbox_obj) new_obj_dict[JSON_OBJ_LIST_KEY].append(obj) # Debug Visualizations - Export an image of mask and bounding box for every object in an image if create_viz: # Draw bounding box rand_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) cv2.rectangle(img_rgb, (x1, y1), (x2, y2), rand_color, 2) filename = dir_dst.joinpath(img_num_info + SUFFIX_BBOX) # print(f"Saving bbox json: {filename}") with open(filename, 'w') as outfile: json.dump(new_obj_dict, outfile, indent=4) if create_viz: filename = dir_dst.joinpath(img_num_info + SUFFIX_VIZ) # print(f"Saving bbox viz: {filename}") cv2.imwrite(str(filename), img_rgb) import time time.sleep(1) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create 2D Bounding Box for each object in an image from renders" " of Room API") parser.add_argument("-s", "--src_dir", help="Dir where container segmentindex.png and info.json files are stored", required=True) parser.add_argument("-d", "--dst_dir", help="Optional. Dir where json files of bounding boxes should be saved", required=False, default=None) parser.add_argument("--debug_viz_bbox_mask", help="Visualize the mask and bounding box of EVERY object in EVERY " "image. Warning: This is very slow.", required=False, action="store_true") parser.add_argument("-w", "--workers", help="Number of processes to use. " "Defaults to the number of processors on the machine.", default=0, type=int) args = parser.parse_args() main(args)
import argparse import itertools import json import concurrent.futures import random from tqdm import tqdm from pathlib import Path import cv2 import numpy as np # Input SUFFIX_RGB = '.rgb.png' # RGB image. Used to visualize the bounding boxes SUFFIX_SEGID = '.segments.png' SUFFIX_INFO = '.info.json' JSON_OBJ_ID_KEY = 'mask_id' # In json, this param contains the ID of the object in the mask JSON_OBJ_LIST_KEY = 'objects' # In json, this param contains list of the object JSON_HAZARD_IDENTIFIER_KEY = 'position' # Only hazards will have this param # Output SUFFIX_BBOX = '.bbox.json' SUFFIX_VIZ = '.bbox.jpg' # Viz of bboxes on RGB image SEED_RANDOM = 0 def main(args): """Creates 2D Bounding Box for each object in an image from renders of Room API The info.json files in the Room API contain 3D bounding boxes for each object. Objects do not include the floor and walls. They can be shoes, belts, etc. We project the 3D bounding boxes to 2D bounding boxes in the image plane using the camera intrinsics and extrinsics and optionally visualize the boxes. """ dir_src = Path(args.src_dir) if not dir_src.exists() or not dir_src.is_dir(): raise ValueError(f"The given directory was not found: {dir_src}") if args.dst_dir: dir_dst = Path(args.dst_dir) if not dir_dst.exists(): print(f"Creating output directory: {dir_dst}") dir_dst.mkdir(parents=True) else: dir_dst = dir_src list_infos = list(sorted(dir_src.glob('*' + SUFFIX_INFO))) list_segids = list(sorted(dir_src.glob('*' + SUFFIX_SEGID))) num_infos = len(list_infos) num_segids = len(list_segids) if num_infos < 1: raise ValueError(f"No {SUFFIX_INFO} files found in dir: {dir_src}") if num_segids < 1: raise ValueError(f"No {SUFFIX_SEGID} files found in dir: {dir_src}") if num_segids != num_infos: print(f"Error: The number of segmentindex files ({num_segids}) does not match" f" the number of info files ({num_infos}).\n Calculating mismatching files...") img_nums_segid = [_img.name[:-len(SUFFIX_SEGID)] for _img in list_segids] img_nums_info = [_img.name[:-len(SUFFIX_INFO)] for _img in list_infos] raise ValueError(f"Mismatch in number of segmentindex and info files. These are the mismatching img numbers:\n" f"{list(set(img_nums_segid) ^ set(img_nums_info))}") print(f'Creating bounding boxes: ({SUFFIX_BBOX})\n' f' Num Info files found ({SUFFIX_INFO}): {num_infos}\n' f' Num Segments files found ({SUFFIX_SEGID}): {num_segids}\n' f' Output Dir: {dir_dst}\n') if args.debug_viz_bbox_mask: create_viz = True list_rgb = list(sorted(dir_src.glob('*' + SUFFIX_RGB))) num_rgb = len(list_rgb) if num_rgb != num_infos: print(f"Error: The number of RGB files ({num_rgb}) does not match" f" the number of info files ({num_infos}).") img_nums_rgb = [_img.name[:-len(SUFFIX_RGB)] for _img in list_rgb] img_nums_info = [_img.name[:-len(SUFFIX_INFO)] for _img in list_infos] raise ValueError( f"Mismatch in number of rgb and info files. These are the mismatching img numbers:\n" f"{list(set(img_nums_rgb) ^ set(img_nums_info))}") print(f'Creating bounding boxes visualizations ({SUFFIX_VIZ}).\n' f' Num RGB files found ({SUFFIX_RGB}): {num_rgb}\n' f' Output Dir: {dir_dst}\n' f' WARNING: Creating visualizations can be slow\n') else: create_viz = False list_rgb = itertools.repeat(None) if args.workers > 0: max_workers = args.workers else: max_workers = None with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: with tqdm(total=len(list_segids)) as pbar: for _ in executor.map(export_bbox_json, list_segids, list_infos, itertools.repeat(dir_dst), itertools.repeat(create_viz), list_rgb): # Catch any error raised in processes pbar.update() def export_bbox_json(file_segid, file_info_json, dst_dir, create_viz=False, file_rgb=None): """Export a json file containing info about bounding box of each object in a container from a given segmentindex and info.json file The segmentindex and info.json file should have the same image number. The exported json with bounding box info will also have the same image number. Args: file_segid (str or pathlib.Path): Path to png file containing masks of all objects file_info_json (str or pathlib.Path): Path to a info.json file containing info camera intrinsics+extrinsics as well as of all objects in the scene. dst_dir (str or pathlib.Path): The directory where json files with the 2D bboxes will be exported create_viz (Optional, bool): If given, will create a visualization of the mask and bounding box of EVERY valid object in EVERY image. Warning: This is very slow. file_rgb (Optional, str or pathlib.Path): If create_viz is true, the RGB file is used to visualize the bounding boxes. """ # Check inputs file_segid = Path(file_segid) file_info_json = Path(file_info_json) dir_dst = Path(dst_dir) img_num_segid = file_segid.name[:-len(SUFFIX_SEGID)] img_num_info = file_info_json.name[:-len(SUFFIX_INFO)] if img_num_segid != img_num_info: raise ValueError(f"The image number of segid file ({img_num_segid}) and info file ({img_num_info})" f"are different: {file_segid} {file_info_json}") if not dir_dst.exists(): print(f"Creating output dir: {dir_dst}") dir_dst.mkdir(parents=True) # Read input data segid = cv2.imread(str(file_segid), cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) with open(file_info_json) as json_file: info = json.load(json_file) if create_viz: if file_rgb is None: raise ValueError('No RGB image given. For visuzalizing bboxes, RGB image should be passed.') img_rgb = cv2.imread(str(file_rgb)) new_obj_dict = {JSON_OBJ_LIST_KEY: []} # Stores info that will be output random.seed(SEED_RANDOM) # Process all objects in the scene for obj in info[JSON_OBJ_LIST_KEY]: # Static parts of the scene like walls and floor are also among the list of objects in the json. # However, should ignore them. We filter them out based on the fact that they do not have # parameters such as 'position'. if JSON_HAZARD_IDENTIFIER_KEY not in obj: continue # Get ID of the object in the mask obj_id = obj[JSON_OBJ_ID_KEY] # Get mask of the object mask_obj = (segid == obj_id).astype(np.uint8) # Convert bool to int if np.count_nonzero(mask_obj) == 0: # If mask is empty, skip this object continue # Get bounding box from mask maskx = np.any(mask_obj, axis=0) masky = np.any(mask_obj, axis=1) x1 = np.argmax(maskx) y1 = np.argmax(masky) x2 = len(maskx) - np.argmax(maskx[::-1]) y2 = len(masky) - np.argmax(masky[::-1]) bbox_obj = {"bounding-box-2d": {"x_min": int(x1), "x_max": int(x2), "y_min": int(y1), "y_max": int(y2)}} obj.update(bbox_obj) new_obj_dict[JSON_OBJ_LIST_KEY].append(obj) # Debug Visualizations - Export an image of mask and bounding box for every object in an image if create_viz: # Draw bounding box rand_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) cv2.rectangle(img_rgb, (x1, y1), (x2, y2), rand_color, 2) filename = dir_dst.joinpath(img_num_info + SUFFIX_BBOX) # print(f"Saving bbox json: {filename}") with open(filename, 'w') as outfile: json.dump(new_obj_dict, outfile, indent=4) if create_viz: filename = dir_dst.joinpath(img_num_info + SUFFIX_VIZ) # print(f"Saving bbox viz: {filename}") cv2.imwrite(str(filename), img_rgb) import time time.sleep(1) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create 2D Bounding Box for each object in an image from renders" " of Room API") parser.add_argument("-s", "--src_dir", help="Dir where container segmentindex.png and info.json files are stored", required=True) parser.add_argument("-d", "--dst_dir", help="Optional. Dir where json files of bounding boxes should be saved", required=False, default=None) parser.add_argument("--debug_viz_bbox_mask", help="Visualize the mask and bounding box of EVERY object in EVERY " "image. Warning: This is very slow.", required=False, action="store_true") parser.add_argument("-w", "--workers", help="Number of processes to use. " "Defaults to the number of processors on the machine.", default=0, type=int) args = parser.parse_args() main(args)
en
0.802859
# Input # RGB image. Used to visualize the bounding boxes # In json, this param contains the ID of the object in the mask # In json, this param contains list of the object # Only hazards will have this param # Output # Viz of bboxes on RGB image Creates 2D Bounding Box for each object in an image from renders of Room API The info.json files in the Room API contain 3D bounding boxes for each object. Objects do not include the floor and walls. They can be shoes, belts, etc. We project the 3D bounding boxes to 2D bounding boxes in the image plane using the camera intrinsics and extrinsics and optionally visualize the boxes. # Catch any error raised in processes Export a json file containing info about bounding box of each object in a container from a given segmentindex and info.json file The segmentindex and info.json file should have the same image number. The exported json with bounding box info will also have the same image number. Args: file_segid (str or pathlib.Path): Path to png file containing masks of all objects file_info_json (str or pathlib.Path): Path to a info.json file containing info camera intrinsics+extrinsics as well as of all objects in the scene. dst_dir (str or pathlib.Path): The directory where json files with the 2D bboxes will be exported create_viz (Optional, bool): If given, will create a visualization of the mask and bounding box of EVERY valid object in EVERY image. Warning: This is very slow. file_rgb (Optional, str or pathlib.Path): If create_viz is true, the RGB file is used to visualize the bounding boxes. # Check inputs # Read input data # Stores info that will be output # Process all objects in the scene # Static parts of the scene like walls and floor are also among the list of objects in the json. # However, should ignore them. We filter them out based on the fact that they do not have # parameters such as 'position'. # Get ID of the object in the mask # Get mask of the object # Convert bool to int # If mask is empty, skip this object # Get bounding box from mask # Debug Visualizations - Export an image of mask and bounding box for every object in an image # Draw bounding box # print(f"Saving bbox json: {filename}") # print(f"Saving bbox viz: {filename}")
2.607422
3
pucadmin/frontoffice/tests.py
JobDoesburg/PUC-admin
0
6617577
<filename>pucadmin/frontoffice/tests.py from django.contrib.auth import get_user_model from django.core.files.uploadedfile import SimpleUploadedFile from django.utils import timezone from django.test import Client, TestCase, RequestFactory from frontoffice import views from frontoffice.forms import ( QuestionStudentFormset, QuestionSubmissionForm, SubmissionForm, CompetitionStudentFormset, CompetitionSupervisorFormSet, ) from organisations.models import Course, Organisation from competitions.models import ( Submission, Competition, Student as CompetitionStudent, Supervisor as CompetitionSupervisor, ) from questions.models import Question, Student as QuestionStudent def _instantiate_formset(formset_class, data, instance=None, initial=None): prefix = formset_class().prefix formset_data = {} for i, form_data in enumerate(data): for name, value in form_data.items(): if isinstance(value, list): for j, inner in enumerate(value): formset_data["{}-{}-{}_{}".format(prefix, i, name, j)] = inner else: formset_data["{}-{}-{}".format(prefix, i, name)] = value formset_data["{}-TOTAL_FORMS".format(prefix)] = len(data) formset_data["{}-INITIAL_FORMS".format(prefix)] = 0 if instance: return formset_class(formset_data, instance=instance, initial=initial) else: return formset_class(formset_data, initial=initial) class QuestionFrontOfficeTest(TestCase): def setUp(self): self.organisation = Organisation.objects.create(name="PUC of Science") self.course = Course.objects.create( name="natuurkunde", slug="nat", organisation=self.organisation ) self.form_data = { "school_text": "Test college Nijmegen", "course": self.course, "research_question": "Lorem ipsum dolor sit amet", "sub_questions": "Test test test", "message": "Test test test", "expected_end_date": timezone.datetime(year=2022, month=1, day=1), "privacy_policy": 1, } self.formset_data = [ { "first_name": "Firstname1", "last_name": "Lastname1", "email": "<EMAIL>", }, { "first_name": "Firstname2", "last_name": "Lastname2", "email": "<EMAIL>", }, ] self.user = get_user_model().objects.create_user( username="test1", email="<EMAIL>" ) self.rf = RequestFactory() self.view = views.QuestionSubmissionView() self.client = Client() self.client.force_login(self.user) def test_privacy_policy_checked(self): with self.subTest("Form is valid"): form = QuestionSubmissionForm(self.form_data) self.assertTrue(form.is_valid(), msg=dict(form.errors)) with self.subTest("Form is not valid"): self.form_data["privacy_policy"] = 0 form = QuestionSubmissionForm(self.form_data) self.assertFalse(form.is_valid(), msg=dict(form.errors)) def test_formset(self): formset = _instantiate_formset(QuestionStudentFormset, self.formset_data) self.assertTrue(formset.is_valid()) def test_submit_form(self): self.form_data["course"] = self.course.id self.form_data["expected_end_date"] = "01-01-2022" formset = _instantiate_formset(QuestionStudentFormset, self.formset_data) data = {**self.form_data, **formset.data} response = self.client.post(f"/frontoffice/question/", data=data, follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(Question.objects.count(), 1) self.assertEqual(QuestionStudent.objects.count(), 2) question = Question.objects.first() student1 = QuestionStudent.objects.first() student2 = QuestionStudent.objects.last() self.assertEqual(question.school_text, self.form_data["school_text"]) self.assertEqual(question.course, self.course) self.assertEqual( question.research_question, self.form_data["research_question"] ) self.assertEqual(question.sub_questions, self.form_data["sub_questions"]) self.assertEqual(question.message, self.form_data["message"]) self.assertEqual( question.expected_end_date, timezone.datetime(year=2022, month=1, day=1).date(), ) self.assertEqual(student1.first_name, self.formset_data[0]["first_name"]) self.assertEqual(student1.last_name, self.formset_data[0]["last_name"]) self.assertEqual(student1.email, self.formset_data[0]["email"]) self.assertEqual(student1.question, question) self.assertEqual(student2.first_name, self.formset_data[1]["first_name"]) self.assertEqual(student2.last_name, self.formset_data[1]["last_name"]) self.assertEqual(student2.email, self.formset_data[1]["email"]) self.assertEqual(student2.question, question) class CompetitionFrontOfficeTest(TestCase): def setUp(self): self.organisation = Organisation.objects.create(name="PUC of Science") self.competition = Competition.objects.create( name="<NAME> 2022", organisation=self.organisation, registration_start=timezone.now() - timezone.timedelta(days=1), registration_end=timezone.now() + timezone.timedelta(days=1), ) self.course = Course.objects.create( name="natuurkunde", slug="nat", organisation=self.organisation ) self.test_file = SimpleUploadedFile( "test_document.pdf", b"\x00\x00\x00", content_type="application/pdf" ) self.form_data = { "title": "Test title", "competition": self.competition, "course": self.course, "abstract": "Lorem ipsum dolor sit amet", "school_text": "Test test", "privacy_policy": 1, } self.student_formset_data = [ { "first_name": "Firstname1", "last_name": "Lastname1", "address_1": "Address 11", "address_2": "Address 12", "zip": "1234 AB", "town": "Nijmegen", "phone": "76543210", "email": "<EMAIL>", }, { "first_name": "Firstname2", "last_name": "Lastname2", "address_1": "Address 12", "address_2": "Address 22", "zip": "4321 AB", "town": "Nijmegen", "phone": "01234567", "email": "<EMAIL>", }, ] self.supervisor_formset_data = [ { "first_name": "Firstname1", "last_name": "Lastname1", "phone": "76543210", "email": "<EMAIL>", "course": self.course, }, ] self.user = get_user_model().objects.create_user( username="test1", email="<EMAIL>" ) self.rf = RequestFactory() self.view = views.CompetitionSubmissionView() self.client = Client() self.client.force_login(self.user) def test_privacy_policy_checked(self): with self.subTest("Form is valid"): form = SubmissionForm(self.form_data, {"document": self.test_file}) self.assertTrue(form.is_valid(), msg=dict(form.errors)) with self.subTest("Form is not valid"): self.form_data["privacy_policy"] = 0 form = SubmissionForm(self.form_data, {"document": self.test_file}) self.assertFalse(form.is_valid(), msg=dict(form.errors)) def test_formset(self): student_formset = _instantiate_formset( CompetitionStudentFormset, self.student_formset_data ) self.assertTrue(student_formset.is_valid()) supervisor_formset = _instantiate_formset( CompetitionSupervisorFormSet, self.supervisor_formset_data ) self.assertTrue(supervisor_formset.is_valid()) def test_submit_form(self): self.form_data["course"] = self.course.id self.form_data["competition"] = self.competition.id self.supervisor_formset_data[0]["course"] = self.course.id student_formset = _instantiate_formset( CompetitionStudentFormset, self.student_formset_data ) supervisor_formset = _instantiate_formset( CompetitionSupervisorFormSet, self.supervisor_formset_data ) data = {**self.form_data, **student_formset.data, **supervisor_formset.data} response = self.client.post(f"/frontoffice/competition/", data, follow=True) # Test does not work because uploading a file does not work properly in test cases # self.assertEqual(response.status_code, 200) # self.assertEqual(Submission.objects.count(), 1) # self.assertEqual(CompetitionStudent.objects.count(), 2) # self.assertEqual(CompetitionSupervisor.objects.count(), 1) # submission = Submission.objects.first() # student1 = CompetitionStudent.objects.first() # student2 = CompetitionStudent.objects.last() # supervisor = CompetitionSupervisor.objects.first() # # self.assertEqual(submission.competition, self.competition) # self.assertEqual(submission.title, self.form_data["title"]) # self.assertEqual(submission.course, self.course) # self.assertEqual(submission.abstract, self.form_data["abstract"]) # self.assertEqual(submission.school_text, self.form_data["school_text"]) # self.assertEqual( # student1.first_name, self.student_formset_data[0]["first_name"] # ) # self.assertEqual(student1.last_name, self.student_formset_data[0]["last_name"]) # self.assertEqual(student1.address_1, self.student_formset_data[0]["address_1"]) # self.assertEqual(student1.address_2, self.student_formset_data[0]["address_2"]) # self.assertEqual(student1.zip, self.student_formset_data[0]["zip"]) # self.assertEqual(student1.town, self.student_formset_data[0]["town"]) # self.assertEqual(student1.phone, self.student_formset_data[0]["phone"]) # self.assertEqual(student1.email, self.student_formset_data[0]["email"]) # self.assertEqual(student1.submission, submission) # self.assertEqual( # student2.first_name, self.student_formset_data[1]["first_name"] # ) # self.assertEqual(student2.last_name, self.student_formset_data[1]["last_name"]) # self.assertEqual(student2.address_1, self.student_formset_data[1]["address_1"]) # self.assertEqual(student2.address_2, self.student_formset_data[1]["address_2"]) # self.assertEqual(student2.zip, self.student_formset_data[1]["zip"]) # self.assertEqual(student2.town, self.student_formset_data[1]["town"]) # self.assertEqual(student2.phone, self.student_formset_data[1]["phone"]) # self.assertEqual(student2.email, self.student_formset_data[1]["email"]) # self.assertEqual(student2.submission, submission) # self.assertEqual( # supervisor.first_name, self.supervisor_formset_data[0]["first_name"] # ) # self.assertEqual( # supervisor.last_name, self.supervisor_formset_data[0]["last_name"] # ) # self.assertEqual(supervisor.phone, self.supervisor_formset_data[0]["phone"]) # self.assertEqual(supervisor.email, self.supervisor_formset_data[0]["email"]) # self.assertEqual(supervisor.course, self.course) # self.assertEqual(supervisor.submission, submission)
<filename>pucadmin/frontoffice/tests.py from django.contrib.auth import get_user_model from django.core.files.uploadedfile import SimpleUploadedFile from django.utils import timezone from django.test import Client, TestCase, RequestFactory from frontoffice import views from frontoffice.forms import ( QuestionStudentFormset, QuestionSubmissionForm, SubmissionForm, CompetitionStudentFormset, CompetitionSupervisorFormSet, ) from organisations.models import Course, Organisation from competitions.models import ( Submission, Competition, Student as CompetitionStudent, Supervisor as CompetitionSupervisor, ) from questions.models import Question, Student as QuestionStudent def _instantiate_formset(formset_class, data, instance=None, initial=None): prefix = formset_class().prefix formset_data = {} for i, form_data in enumerate(data): for name, value in form_data.items(): if isinstance(value, list): for j, inner in enumerate(value): formset_data["{}-{}-{}_{}".format(prefix, i, name, j)] = inner else: formset_data["{}-{}-{}".format(prefix, i, name)] = value formset_data["{}-TOTAL_FORMS".format(prefix)] = len(data) formset_data["{}-INITIAL_FORMS".format(prefix)] = 0 if instance: return formset_class(formset_data, instance=instance, initial=initial) else: return formset_class(formset_data, initial=initial) class QuestionFrontOfficeTest(TestCase): def setUp(self): self.organisation = Organisation.objects.create(name="PUC of Science") self.course = Course.objects.create( name="natuurkunde", slug="nat", organisation=self.organisation ) self.form_data = { "school_text": "Test college Nijmegen", "course": self.course, "research_question": "Lorem ipsum dolor sit amet", "sub_questions": "Test test test", "message": "Test test test", "expected_end_date": timezone.datetime(year=2022, month=1, day=1), "privacy_policy": 1, } self.formset_data = [ { "first_name": "Firstname1", "last_name": "Lastname1", "email": "<EMAIL>", }, { "first_name": "Firstname2", "last_name": "Lastname2", "email": "<EMAIL>", }, ] self.user = get_user_model().objects.create_user( username="test1", email="<EMAIL>" ) self.rf = RequestFactory() self.view = views.QuestionSubmissionView() self.client = Client() self.client.force_login(self.user) def test_privacy_policy_checked(self): with self.subTest("Form is valid"): form = QuestionSubmissionForm(self.form_data) self.assertTrue(form.is_valid(), msg=dict(form.errors)) with self.subTest("Form is not valid"): self.form_data["privacy_policy"] = 0 form = QuestionSubmissionForm(self.form_data) self.assertFalse(form.is_valid(), msg=dict(form.errors)) def test_formset(self): formset = _instantiate_formset(QuestionStudentFormset, self.formset_data) self.assertTrue(formset.is_valid()) def test_submit_form(self): self.form_data["course"] = self.course.id self.form_data["expected_end_date"] = "01-01-2022" formset = _instantiate_formset(QuestionStudentFormset, self.formset_data) data = {**self.form_data, **formset.data} response = self.client.post(f"/frontoffice/question/", data=data, follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(Question.objects.count(), 1) self.assertEqual(QuestionStudent.objects.count(), 2) question = Question.objects.first() student1 = QuestionStudent.objects.first() student2 = QuestionStudent.objects.last() self.assertEqual(question.school_text, self.form_data["school_text"]) self.assertEqual(question.course, self.course) self.assertEqual( question.research_question, self.form_data["research_question"] ) self.assertEqual(question.sub_questions, self.form_data["sub_questions"]) self.assertEqual(question.message, self.form_data["message"]) self.assertEqual( question.expected_end_date, timezone.datetime(year=2022, month=1, day=1).date(), ) self.assertEqual(student1.first_name, self.formset_data[0]["first_name"]) self.assertEqual(student1.last_name, self.formset_data[0]["last_name"]) self.assertEqual(student1.email, self.formset_data[0]["email"]) self.assertEqual(student1.question, question) self.assertEqual(student2.first_name, self.formset_data[1]["first_name"]) self.assertEqual(student2.last_name, self.formset_data[1]["last_name"]) self.assertEqual(student2.email, self.formset_data[1]["email"]) self.assertEqual(student2.question, question) class CompetitionFrontOfficeTest(TestCase): def setUp(self): self.organisation = Organisation.objects.create(name="PUC of Science") self.competition = Competition.objects.create( name="<NAME> 2022", organisation=self.organisation, registration_start=timezone.now() - timezone.timedelta(days=1), registration_end=timezone.now() + timezone.timedelta(days=1), ) self.course = Course.objects.create( name="natuurkunde", slug="nat", organisation=self.organisation ) self.test_file = SimpleUploadedFile( "test_document.pdf", b"\x00\x00\x00", content_type="application/pdf" ) self.form_data = { "title": "Test title", "competition": self.competition, "course": self.course, "abstract": "Lorem ipsum dolor sit amet", "school_text": "Test test", "privacy_policy": 1, } self.student_formset_data = [ { "first_name": "Firstname1", "last_name": "Lastname1", "address_1": "Address 11", "address_2": "Address 12", "zip": "1234 AB", "town": "Nijmegen", "phone": "76543210", "email": "<EMAIL>", }, { "first_name": "Firstname2", "last_name": "Lastname2", "address_1": "Address 12", "address_2": "Address 22", "zip": "4321 AB", "town": "Nijmegen", "phone": "01234567", "email": "<EMAIL>", }, ] self.supervisor_formset_data = [ { "first_name": "Firstname1", "last_name": "Lastname1", "phone": "76543210", "email": "<EMAIL>", "course": self.course, }, ] self.user = get_user_model().objects.create_user( username="test1", email="<EMAIL>" ) self.rf = RequestFactory() self.view = views.CompetitionSubmissionView() self.client = Client() self.client.force_login(self.user) def test_privacy_policy_checked(self): with self.subTest("Form is valid"): form = SubmissionForm(self.form_data, {"document": self.test_file}) self.assertTrue(form.is_valid(), msg=dict(form.errors)) with self.subTest("Form is not valid"): self.form_data["privacy_policy"] = 0 form = SubmissionForm(self.form_data, {"document": self.test_file}) self.assertFalse(form.is_valid(), msg=dict(form.errors)) def test_formset(self): student_formset = _instantiate_formset( CompetitionStudentFormset, self.student_formset_data ) self.assertTrue(student_formset.is_valid()) supervisor_formset = _instantiate_formset( CompetitionSupervisorFormSet, self.supervisor_formset_data ) self.assertTrue(supervisor_formset.is_valid()) def test_submit_form(self): self.form_data["course"] = self.course.id self.form_data["competition"] = self.competition.id self.supervisor_formset_data[0]["course"] = self.course.id student_formset = _instantiate_formset( CompetitionStudentFormset, self.student_formset_data ) supervisor_formset = _instantiate_formset( CompetitionSupervisorFormSet, self.supervisor_formset_data ) data = {**self.form_data, **student_formset.data, **supervisor_formset.data} response = self.client.post(f"/frontoffice/competition/", data, follow=True) # Test does not work because uploading a file does not work properly in test cases # self.assertEqual(response.status_code, 200) # self.assertEqual(Submission.objects.count(), 1) # self.assertEqual(CompetitionStudent.objects.count(), 2) # self.assertEqual(CompetitionSupervisor.objects.count(), 1) # submission = Submission.objects.first() # student1 = CompetitionStudent.objects.first() # student2 = CompetitionStudent.objects.last() # supervisor = CompetitionSupervisor.objects.first() # # self.assertEqual(submission.competition, self.competition) # self.assertEqual(submission.title, self.form_data["title"]) # self.assertEqual(submission.course, self.course) # self.assertEqual(submission.abstract, self.form_data["abstract"]) # self.assertEqual(submission.school_text, self.form_data["school_text"]) # self.assertEqual( # student1.first_name, self.student_formset_data[0]["first_name"] # ) # self.assertEqual(student1.last_name, self.student_formset_data[0]["last_name"]) # self.assertEqual(student1.address_1, self.student_formset_data[0]["address_1"]) # self.assertEqual(student1.address_2, self.student_formset_data[0]["address_2"]) # self.assertEqual(student1.zip, self.student_formset_data[0]["zip"]) # self.assertEqual(student1.town, self.student_formset_data[0]["town"]) # self.assertEqual(student1.phone, self.student_formset_data[0]["phone"]) # self.assertEqual(student1.email, self.student_formset_data[0]["email"]) # self.assertEqual(student1.submission, submission) # self.assertEqual( # student2.first_name, self.student_formset_data[1]["first_name"] # ) # self.assertEqual(student2.last_name, self.student_formset_data[1]["last_name"]) # self.assertEqual(student2.address_1, self.student_formset_data[1]["address_1"]) # self.assertEqual(student2.address_2, self.student_formset_data[1]["address_2"]) # self.assertEqual(student2.zip, self.student_formset_data[1]["zip"]) # self.assertEqual(student2.town, self.student_formset_data[1]["town"]) # self.assertEqual(student2.phone, self.student_formset_data[1]["phone"]) # self.assertEqual(student2.email, self.student_formset_data[1]["email"]) # self.assertEqual(student2.submission, submission) # self.assertEqual( # supervisor.first_name, self.supervisor_formset_data[0]["first_name"] # ) # self.assertEqual( # supervisor.last_name, self.supervisor_formset_data[0]["last_name"] # ) # self.assertEqual(supervisor.phone, self.supervisor_formset_data[0]["phone"]) # self.assertEqual(supervisor.email, self.supervisor_formset_data[0]["email"]) # self.assertEqual(supervisor.course, self.course) # self.assertEqual(supervisor.submission, submission)
en
0.530778
# Test does not work because uploading a file does not work properly in test cases # self.assertEqual(response.status_code, 200) # self.assertEqual(Submission.objects.count(), 1) # self.assertEqual(CompetitionStudent.objects.count(), 2) # self.assertEqual(CompetitionSupervisor.objects.count(), 1) # submission = Submission.objects.first() # student1 = CompetitionStudent.objects.first() # student2 = CompetitionStudent.objects.last() # supervisor = CompetitionSupervisor.objects.first() # # self.assertEqual(submission.competition, self.competition) # self.assertEqual(submission.title, self.form_data["title"]) # self.assertEqual(submission.course, self.course) # self.assertEqual(submission.abstract, self.form_data["abstract"]) # self.assertEqual(submission.school_text, self.form_data["school_text"]) # self.assertEqual( # student1.first_name, self.student_formset_data[0]["first_name"] # ) # self.assertEqual(student1.last_name, self.student_formset_data[0]["last_name"]) # self.assertEqual(student1.address_1, self.student_formset_data[0]["address_1"]) # self.assertEqual(student1.address_2, self.student_formset_data[0]["address_2"]) # self.assertEqual(student1.zip, self.student_formset_data[0]["zip"]) # self.assertEqual(student1.town, self.student_formset_data[0]["town"]) # self.assertEqual(student1.phone, self.student_formset_data[0]["phone"]) # self.assertEqual(student1.email, self.student_formset_data[0]["email"]) # self.assertEqual(student1.submission, submission) # self.assertEqual( # student2.first_name, self.student_formset_data[1]["first_name"] # ) # self.assertEqual(student2.last_name, self.student_formset_data[1]["last_name"]) # self.assertEqual(student2.address_1, self.student_formset_data[1]["address_1"]) # self.assertEqual(student2.address_2, self.student_formset_data[1]["address_2"]) # self.assertEqual(student2.zip, self.student_formset_data[1]["zip"]) # self.assertEqual(student2.town, self.student_formset_data[1]["town"]) # self.assertEqual(student2.phone, self.student_formset_data[1]["phone"]) # self.assertEqual(student2.email, self.student_formset_data[1]["email"]) # self.assertEqual(student2.submission, submission) # self.assertEqual( # supervisor.first_name, self.supervisor_formset_data[0]["first_name"] # ) # self.assertEqual( # supervisor.last_name, self.supervisor_formset_data[0]["last_name"] # ) # self.assertEqual(supervisor.phone, self.supervisor_formset_data[0]["phone"]) # self.assertEqual(supervisor.email, self.supervisor_formset_data[0]["email"]) # self.assertEqual(supervisor.course, self.course) # self.assertEqual(supervisor.submission, submission)
2.252773
2
gs/profile/notify/__init__.py
groupserver/gs.profile.notify
0
6617578
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals #lint:disable from .notifyuser import NotifyUser from .sender import MessageSender #lint:enable from AccessControl import ModuleSecurityInfo from AccessControl import allow_class m_security = ModuleSecurityInfo('gs.profile.notify.notifyuser') m_security.declarePublic('NotifyUser') allow_class(NotifyUser)
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals #lint:disable from .notifyuser import NotifyUser from .sender import MessageSender #lint:enable from AccessControl import ModuleSecurityInfo from AccessControl import allow_class m_security = ModuleSecurityInfo('gs.profile.notify.notifyuser') m_security.declarePublic('NotifyUser') allow_class(NotifyUser)
en
0.245874
# -*- coding: utf-8 -*- #lint:disable #lint:enable
1.352401
1
TEP/atcoder245/A.py
GuilhermeBraz/unb-workflow
0
6617579
# One day, Takahashi got up at exactly B minutes past A o'clock (in 24-hour clock), and Aoki got up at exactly D minutes and 1 second past C o'clock. # If Takahashi got up earlier than Aoki, print Takahashi; otherwise, print Aoki. import datetime A, B, C, D = map(int, input().split()) takahashi = datetime.time(A, B) aoki = datetime.time(C, D, 1) print('Takahashi' if takahashi < aoki else 'Aoki')
# One day, Takahashi got up at exactly B minutes past A o'clock (in 24-hour clock), and Aoki got up at exactly D minutes and 1 second past C o'clock. # If Takahashi got up earlier than Aoki, print Takahashi; otherwise, print Aoki. import datetime A, B, C, D = map(int, input().split()) takahashi = datetime.time(A, B) aoki = datetime.time(C, D, 1) print('Takahashi' if takahashi < aoki else 'Aoki')
en
0.905143
# One day, Takahashi got up at exactly B minutes past A o'clock (in 24-hour clock), and Aoki got up at exactly D minutes and 1 second past C o'clock. # If Takahashi got up earlier than Aoki, print Takahashi; otherwise, print Aoki.
3.461946
3
venv/lib/python3.8/site-packages/validation/tests/test_uuid.py
julio9246/hg-poker-api
9
6617580
<gh_stars>1-10 import unittest import uuid from validation import validate_uuid class ValidateUUIDTestCase(unittest.TestCase): def test_uuid1_valid(self): validate_uuid(uuid.uuid1()) def test_uuid1_expected_valid(self): validate_uuid(uuid.uuid1(), version=1) def test_uuid1_expected_invalid(self): with self.assertRaises(ValueError): validate_uuid(uuid.uuid4(), version=1) def test_uuid3_valid(self): validate_uuid(uuid.uuid3(uuid.uuid4(), "name")) def test_uuid3_expected_valid(self): validate_uuid(uuid.uuid3(uuid.uuid4(), "name"), version=3) def test_uuid3_expected_invalid(self): with self.assertRaises(ValueError): validate_uuid(uuid.uuid4(), version=3) def test_uuid4_valid(self): validate_uuid(uuid.uuid4()) def test_uuid5_valid(self): validate_uuid(uuid.uuid5(uuid.uuid4(), "name")) def test_rfc4122_valid(self): validate_uuid(uuid.uuid4(), variant=uuid.RFC_4122) def test_microsoft_invalid(self): with self.assertRaises(ValueError): validate_uuid(uuid.uuid4(), variant=uuid.RESERVED_MICROSOFT) def test_incompatible_variant_version(self): with self.assertRaises(ValueError): validate_uuid(variant=uuid.RESERVED_MICROSOFT, version=4) def test_not_required(self): validate_uuid(None, required=False) def test_required(self): with self.assertRaises(TypeError): validate_uuid(None) def test_repr_required_false(self): validator = validate_uuid(required=False) self.assertEqual( repr(validator), 'validate_uuid(required=False)', ) def test_repr_full(self): validator = validate_uuid(variant=uuid.RFC_4122, version=3) self.assertEqual( repr(validator), 'validate_uuid(variant=uuid.RFC_4122, version=3)', )
import unittest import uuid from validation import validate_uuid class ValidateUUIDTestCase(unittest.TestCase): def test_uuid1_valid(self): validate_uuid(uuid.uuid1()) def test_uuid1_expected_valid(self): validate_uuid(uuid.uuid1(), version=1) def test_uuid1_expected_invalid(self): with self.assertRaises(ValueError): validate_uuid(uuid.uuid4(), version=1) def test_uuid3_valid(self): validate_uuid(uuid.uuid3(uuid.uuid4(), "name")) def test_uuid3_expected_valid(self): validate_uuid(uuid.uuid3(uuid.uuid4(), "name"), version=3) def test_uuid3_expected_invalid(self): with self.assertRaises(ValueError): validate_uuid(uuid.uuid4(), version=3) def test_uuid4_valid(self): validate_uuid(uuid.uuid4()) def test_uuid5_valid(self): validate_uuid(uuid.uuid5(uuid.uuid4(), "name")) def test_rfc4122_valid(self): validate_uuid(uuid.uuid4(), variant=uuid.RFC_4122) def test_microsoft_invalid(self): with self.assertRaises(ValueError): validate_uuid(uuid.uuid4(), variant=uuid.RESERVED_MICROSOFT) def test_incompatible_variant_version(self): with self.assertRaises(ValueError): validate_uuid(variant=uuid.RESERVED_MICROSOFT, version=4) def test_not_required(self): validate_uuid(None, required=False) def test_required(self): with self.assertRaises(TypeError): validate_uuid(None) def test_repr_required_false(self): validator = validate_uuid(required=False) self.assertEqual( repr(validator), 'validate_uuid(required=False)', ) def test_repr_full(self): validator = validate_uuid(variant=uuid.RFC_4122, version=3) self.assertEqual( repr(validator), 'validate_uuid(variant=uuid.RFC_4122, version=3)', )
none
1
3.040515
3
scrape.py
dvtate/node-naive-bayes
0
6617581
import newspaper; # https://newspaper.readthedocs.io/en/latest/ import sys; import json; ''' # need to run once to download natural langauge toolkit import nltk nltk.download('punkt') '''; # This script accepts an article url as argument try: article = newspaper.Article(sys.argv[1]); article.download(); article.parse(); nlp = sys.argv[2]; if nlp: article.nlp(); if not article.text: with open("/tmp/badurls.txt", 'a') as f: f.write(sys.argv[1]); print("null"); else: ret = { "text" : article.text, "date" : article.publish_date, }; if nlp: ret["summary"] = article.summary; ret["keywords"] = '\n'.join(article.keywords); print(json.dumps(ret)); except: # null on error with open("/tmp/badurls.txt", 'a') as f: f.write(sys.argv[1]); print("null");
import newspaper; # https://newspaper.readthedocs.io/en/latest/ import sys; import json; ''' # need to run once to download natural langauge toolkit import nltk nltk.download('punkt') '''; # This script accepts an article url as argument try: article = newspaper.Article(sys.argv[1]); article.download(); article.parse(); nlp = sys.argv[2]; if nlp: article.nlp(); if not article.text: with open("/tmp/badurls.txt", 'a') as f: f.write(sys.argv[1]); print("null"); else: ret = { "text" : article.text, "date" : article.publish_date, }; if nlp: ret["summary"] = article.summary; ret["keywords"] = '\n'.join(article.keywords); print(json.dumps(ret)); except: # null on error with open("/tmp/badurls.txt", 'a') as f: f.write(sys.argv[1]); print("null");
en
0.696498
# https://newspaper.readthedocs.io/en/latest/ # need to run once to download natural langauge toolkit import nltk nltk.download('punkt') # This script accepts an article url as argument # null on error
3.114194
3
Bokeh_Apps/Bokeh_CockroachExtracellular.py
neurologic/NeurophysiologyModules
0
6617582
import sys from pathlib import Path import scipy import numpy as np import pandas as pd from scipy import ndimage from scipy.signal import find_peaks from copy import deepcopy import math from bokeh.layouts import layout, row, column, gridplot, widgetbox from bokeh.plotting import figure, show from bokeh.io import output_file, curdoc from bokeh.models import ColumnDataSource, HoverTool, CategoricalColorMapper, Column from bokeh.models import Button, RangeSlider, TextInput, CheckboxGroup from bokeh.models.widgets import Tabs, Panel, Spinner from bokeh.models import MultiLine, Line, Range1d from bokeh.palettes import Spectral6 from bokeh.themes import Theme import yaml ################# # tab 1 import data and explore ################### def button_callback(): sys.exit() # Stop the server # def import_data(attr,old,new): # """ # function called when either filepath or fs are changed # ToDo: check if file size is too big # """ # # filepath = "/Users/kperks/mnt/PerksLab_rstore/neurophysiology_lab/CockroachLeg/CockroachLeg_20K2021-07-04T09_31_20.bin" # # fs = 40000 # print('uploading data... this may take a moment. smile and relax') # f_ = filepath.value.strip() # #file_input is "new" # fs_ = int(fs.value.strip()) # # y_data = np.fromfile(Path(f_), dtype = np.float64) # y_data = y_data - np.median(y_data) # x_data = np.linspace(0,len(y_data)/fs_,len(y_data)) # max_val_slider = len(y_data)/fs_ # data = {'y' : y_data,'x' : x_data} # new_data = ColumnDataSource(data = data) # src_data.data.update(new_data.data) # # range_slider.update(end=max_val_slider) # start_ = 0 #range_slider.value[0] # stop_ = 1 #range_slider.value[1] # range_selected = [start_,stop_] # new_selection = select_data(range_selected) # data_selected.data.update(new_selection.data) # print('data uploaded') def import_data(): """ function called when either filepath or fs are changed ToDo: check if file size is too big """ # filepath = "/Users/kperks/mnt/PerksLab_rstore/neurophysiology_lab/CockroachLeg/CockroachLeg_20K2021-07-04T09_31_20.bin" # fs = 40000 print('uploading data... this may take a moment. smile and relax') f_ = filepath.value.strip() nchan_ = int(nchan.value.strip()) displaychan_ = int(displaychan.value.strip()) # nervechan_ = int(nervechan_.value.strip()) # synapchan_ = int(synapchan_.value.strip()) # simultaneous post and pre synaptic recording so two channels # nchan_=2 #file_input is "new" fs_ = int(fs.value.strip()) y_data = np.fromfile(Path(f_), dtype = np.float64) y_data = y_data.reshape(-1,nchan_) y_data = y_data[:,displaychan_] # 1 channel y_data = y_data - np.median(y_data,0) x_data = np.linspace(0,np.shape(y_data)[0]/fs_,np.shape(y_data)[0]) max_val_slider = len(y_data)/fs_ data = {'y' : y_data, 'x' : x_data} # data = {'y_syn' : y_data[:,synapchan_], 'y_nerve' : ydata[:,nervechan_], 'x' : x_data} new_data = ColumnDataSource(data = data) src_data.data.update(new_data.data) range_slider.update(end=max_val_slider) start_ = 0 #range_slider.value[0] stop_ = 1 #range_slider.value[1] range_selected = [start_,stop_] range_slider.update(value=(start_,stop_)) new_selection = select_data(range_selected) data_selected.data.update(new_selection.data) print('data uploaded') def select_data(range_selected): fs_ = int(fs.value.strip()) y = src_data.data['y'][int(range_selected[0]*fs_):int(range_selected[1]*fs_)] x = src_data.data['x'][int(range_selected[0]*fs_):int(range_selected[1]*fs_)] data = {'y' : y, 'x' : x} return ColumnDataSource(data = data) # def update_plot1_slider(attr,old,new): # start_ = range_slider.value[0] # end_ = range_slider.value[1] # new_selection = select_data([start_,end_]) # data_selected.data.update(new_selection.data) def button_plot_range_callback(): print('processing range') start_ = range_slider.value[0] end_ = range_slider.value[1] new_selection = select_data([start_,end_]) data_selected.data.update(new_selection.data) print('plot updated') # create exit button button_exit = Button(label="Exit", button_type="success",width=100) button_exit.on_click(button_callback) # PathToFile = "/Users/kperks/OneDrive - wesleyan.edu/Teaching/Neurophysiology/Data/CockroachSensoryPhysiology/40kHz/RepeatedStimulation2021-08-27T18_37_10.bin" # filepath = TextInput(title="path to data file",value="PathToFile",width=800) filepath = TextInput(title="path to data file",value="PathToFile",width=800) # create import data button button_import_data = Button(label="Import Data", button_type="success",width=100) button_import_data.on_click(import_data) # create plot range button button_plot_range = Button(label="Plot X-Range", button_type="success",width=100) button_plot_range.on_click(button_plot_range_callback) # create text inpot for sampling rate fs = TextInput(title="sampling rate",value='30000',width=100) # flexible number of channels recorded in case also did intracell nchan = TextInput(title="number of channels recorded in Bonsai",value='1',width=100) displaychan = TextInput(title="which channel to display/analyze",value='0',width=100) # create hover tool hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) # create figure p = figure(plot_width=1000, plot_height=500, tools=[hover,'pan','box_zoom','wheel_zoom','reset','save'], x_axis_label = 'seconds',y_axis_label='Volts') p.xaxis.major_label_text_font_size = "18pt" p.xaxis.axis_label_text_font_size = "18pt" p.yaxis.major_label_text_font_size = "18pt" p.yaxis.axis_label_text_font_size = "18pt" # initialize a range to plot range_selected = [0,1] # create range slider range_slider = RangeSlider( title="Adjust x-axis range", # a title to display above the slider start=0, # set the minimum value for the slider end=1, # set the maximum value for the slider step=1, # increments for the slider value=(range_selected[0],range_selected[1]), # initial values for slider (range_selected[0],range_selected[1]) width=800 ) # initialize data data = {'x':[],'y':[]} src_data = ColumnDataSource(data) data_selected = ColumnDataSource(data) # plot data within selected range as line line = p.line('x','y',source=data_selected,line_color='black') # collect controls controls = [button_exit,fs,filepath,nchan,displaychan,button_import_data,range_slider,button_plot_range] # layout controls inputs = column(*controls) # show(column(range_slider,p)) # layout all elements together l = column(inputs, p) # create tab tab1 = Panel(child=l,title='import data and explore') ################# # tab 2 overlay trials ################# # def button_saveas(): # #sys.exit() # Stop the server # print('will save dataframe for overlaid data when add function') # # convert ColumnDataSource to dataframe # # save dataframe as h5 ''' spont 0.5,1,1.5,2,2.5,3,3.5,4,24.5,24 tarsa 6.38,7.972,9.666,11.432,13.024,14.746,16.52,18.38,20.246,22.4 barb 26.725,28.926,30.818,32.649,34.561,36.446,38.366,40.305,42.442,44.325 ''' def update_overlay(): fs_ = int(fs.value.strip()) filtert = int(0.01*fs_) offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration2.value.strip()) # sec trial_times_ = trial_times2.value.split(',') trial_times_ = [float(t) for t in trial_times_] #create a new dictionary to store new data to plot temporarily datamat={'ys':[],'xs':[]} xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) ys = [] xs = [] for i,t in enumerate(trial_times_): xs.append(xtime) win0 = int((t-offset_t)*fs_) win1 = win0+int((windur+offset_t)*fs_) y = src_data.data['y'][win0:win1] y = y - np.mean(y) y = np.abs(y) y = ndimage.gaussian_filter(y,filtert) ys.append(y) datamat['ys'] = ys datamat['xs'] = xs data_overlay.data = datamat if do_average.active: if do_average.active[0]==0: data_mean.data = {'x':np.mean(np.asarray(xs),0),'y':np.mean(np.asarray(ys),0)} if not do_average.active: data_mean.data = {'x':[],'y':[]} ################ #create a new dictionary to store raw data to get spikes ############### # create save button # button_save = Button(label="Save", button_type="success", width=100) # button_save.on_click(button_saveas) # create text input for trial times trial_times2 = TextInput(title="List of Trial Start times (comma-separated; seconds)", width=800) # trial_times2.on_change("value",update_overlay) # check whether to plot overlay or not labels = ["plot average across trials"] do_average = CheckboxGroup(labels=labels, active=[0]) # do_average.on_change("active",update_overlay) # create text input for trial times trial_duration2 = TextInput(title="Duration of plot window (seconds)",value='1', width=100) # trial_duration2.on_change("value",update_overlay) button_update_overlay2 = Button(label="Update Plot", button_type="success",width=100) button_update_overlay2.on_click(update_overlay) ymin = TextInput(title="Duration of plot window (seconds)",value='1', width=100) # hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) p2 = figure(plot_width=1000, plot_height=500, tools=['hover','pan','box_zoom','wheel_zoom','reset','save'], x_axis_label='time from stimulus onset (seconds)',y_axis_label='amplitude (arbitrary units)') p2.xaxis.major_label_text_font_size = "18pt" p2.xaxis.axis_label_text_font_size = "18pt" p2.yaxis.major_label_text_font_size = "18pt" p2.yaxis.axis_label_text_font_size = "18pt" # p2.x_range = Range1d(20, 25) # p2.y_range = Range1d(-0.1, 0.1) # get fs_ from text input on tab1 fs_ = int(fs.value.strip()) # hard-coded values for window duration and offset currently also in update function offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration2.value.strip()) # sec # initialize xtime xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) # initialize data_overlay ColumnDataSource data_overlay = ColumnDataSource(data = { 'ys':[np.zeros(int((windur+offset_t)*fs_))], 'xs':[xtime] }) data_mean = ColumnDataSource(data = { 'y':[np.zeros(int((windur+offset_t)*fs_))], 'x':[xtime] }) # use multiline to plot data glyph = MultiLine(xs='xs',ys='ys') p2.add_glyph(data_overlay,glyph) """ ADD show average rate """ p2.line(x = 'x', y='y',source =data_mean,line_color='red',line_width=4) ######### # is there a way to have each line a different color? --yes with a colors list (needs to change with size datamat) # or when hover on a line it is highlighted? ######### # collect controls and layout all elements controls2 = [trial_times2,do_average,trial_duration2,button_update_overlay2]#,button_save] inputs2 = column(*controls2) l2 = column(inputs2,p2) # create tab tab2 = Panel(child=l2,title='overlay trials') ################ # tab3 spike counts and raster ################# # def button_expfit(): # #sys.exit() # Stop the server # print('will do an exponential fit on selected data') def update_plot3(): print('calculating average rate and updating plots') fs_ = int(fs.value.strip()) filtert = int(0.01*fs_) offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration3.value.strip()) # sec trial_times_ = trial_times3.value.split(',') #trial times list text input must not have spaces trial_times_ = [float(t) for t in trial_times_] spk_thresh_ = float(spk_thresh3.value.strip()) xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) win0 = int((trial_times_[0]-offset_t)*fs_) win1 = win0+int((windur+offset_t)*fs_) y = deepcopy(dict(src_data.data)['y'][win0:win1]) data_trial3.data = {'x' : xtime,'y' : deepcopy(dict(src_data.data)['y'][win0:win1])} y[y<=spk_thresh_] = 0 #find peaks of y peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) data_spktimes3.data = {'x':peak_t,'y':np.zeros(len(peak_t))} #create a new dictionary to store new data to plot temporarily datamat={'y':[],'x':[]} binsize = float(bin_size3.value.strip()) spks = [] for i,t in enumerate(trial_times_): win0 = int(t*fs_) win1 = win0+int((windur+offset_t)*fs_) y = deepcopy(dict(src_data.data)['y'][win0:win1]) xtime = deepcopy(dict(src_data.data)['x'][win0:win1])-t y[y<=spk_thresh_] = 0 #find peaks of y peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) spks.extend(peak_t) bins = np.arange(0,windur+binsize,binsize) h,bin_edges = np.histogram(spks,bins) avg_rate_response = h/binsize/len(trial_times_) #(number of spikes per bin divided by duration of bin divided by number of trials) #now get hist for pre-stim spks = [] for i,t in enumerate(trial_times_): win0 = int((t-offset_t)*fs_) win1 = int(t*fs_) y = deepcopy(dict(src_data.data)['y'][win0:win1]) xtime = deepcopy(dict(src_data.data)['x'][win0:win1])-t y[y<=spk_thresh_] = 0 peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) spks.extend(peak_t) bins = np.arange(-offset_t,0+binsize,binsize) h,bin_edges_base = np.histogram(spks,bins) avg_rate_base = h/binsize/len(trial_times_) print('n trials = ') print(len(trial_times_)) print('n spks baseline = ') print(len(peak_t)) datamat['y'] = np.concatenate([avg_rate_base,avg_rate_response]) #np.asarray(ys) # datamat['x'] = bins[0:-1] #np.asarray(xs) datamat['x'] = np.concatenate([bin_edges_base[0:-1],bin_edges[0:-1]]) data_scatter.data = datamat ydr3.start= -10 ydr3.end= np.max(np.concatenate([avg_rate_base,avg_rate_response]))+10 xdr3.start = -offset_t xdr3.end = windur xdr3 = Range1d(start=-0.5,end=1) ydr3 = Range1d(start=-10,end=1000) hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) # create figure for tab 3 p3 = figure(plot_width=1000, plot_height=500, tools=[hover,'pan','box_zoom','lasso_select','wheel_zoom','reset','save'], y_axis_label='average spike rate per bin', x_axis_label='time from stimulus onset (seconds)', x_range=xdr3,y_range=ydr3) p3.xaxis.major_label_text_font_size = "18pt" p3.xaxis.axis_label_text_font_size = "18pt" p3.yaxis.major_label_text_font_size = "18pt" p3.yaxis.axis_label_text_font_size = "18pt" # hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) p3b = figure(plot_width=1000, plot_height=500, tools=[hover,'pan','box_zoom','wheel_zoom','reset','save'], title = 'Example trial (first trial listed) to show spike detection', x_axis_label = 'seconds') p3b.xaxis.major_label_text_font_size = "18pt" p3b.xaxis.axis_label_text_font_size = "18pt" p3b.yaxis.major_label_text_font_size = "18pt" p3b.yaxis.axis_label_text_font_size = "18pt" # create exp fit button # button_dofit = Button(label="Fit Data", button_type="success", width=100) # button_dofit.on_click(button_expfit) # create text input for trial duration trial_duration3 = TextInput(title="Duration of plot window",value='1', width=100) # trial_duration3.on_change("value",update_plot3) # create text input for trial times trial_times3 = TextInput(title="List of Trial Start times (comma-separated no spaces; seconds)", width=800) # trial_times3.on_change("value",update_plot3) # create text input for spike threshold spk_thresh3 = TextInput(title="Spike Threshold (from examining raw data; seconds)",value='0.04', width=100) # spk_thresh3.on_change("value",update_plot3) # create text input for bin size of histogram for spike rate bin_size3 = TextInput(title="Bin Width to Calculate Spike Rate (seconds)",value='0.01', width=100) # bin_size3.on_change("value",update_plot3) button_update_plot3 = Button(label="Update Plot", button_type="success",width=100) button_update_plot3.on_click(update_plot3) # hard-coded values for offset currently also in update function offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration3.value.strip()) # sec # initialize xtime xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) # initialize data_overlay ColumnDataSource data_scatter = ColumnDataSource(data = { 'y':np.zeros(int((windur+offset_t)*fs_)), 'x':xtime }) # use multiline to plot data p3.circle(x='x',y='y',source=data_scatter,color='black') # initialize data_overlay ColumnDataSource data_trial3 = ColumnDataSource(data = { 'y':[np.zeros(int((windur+offset_t)*fs_))], 'x':[xtime] }) data_spktimes3 = ColumnDataSource(data = { 'y':np.zeros(int((windur+offset_t)*fs_)), 'x':xtime }) # use multiline to plot data p3b.line(x = 'x', y='y',source =data_trial3,line_color='black') p3b.circle(x='x',y='y',source=data_spktimes3,color='red',size=6,level='overlay') # collect controls and layout all elements controls3 = [trial_times3,trial_duration3,spk_thresh3,bin_size3,button_update_plot3]#,button_dofit] inputs3 = column(*controls3) l3 = column(inputs3,p3,p3b) # create tab tab3 = Panel(child=l3,title='Spiking Response Histogram') ############# # tab 4 scroll through spike waveforms ############# def update_data4(attr,old,new): """ creates a matrix with each column a different spike waveform rows are time/samples """ fs_ = int(fs.value.strip()) # filtert = int(0.01*fs_) windur = float(plot_duration4.value.strip()) print('windur =' ) print(windur) offset_t = windur/2 # 500 msec include a pre-stimulus onset trialdur = float(trial_duration4.value.strip()) # sec print('trialdur =') print(trialdur) trial_times_ = trial_times4.value.split(',') #trial times list text input must not have spaces trial_times_ = [float(t) for t in trial_times_] print('trial_times_= ') print(trial_times_) spk_thresh_ = float(spk_thresh4.value.strip()) print('spk_thresh_ =') print(spk_thresh_) #create a new dictionary to store new data to plot temporarily datamat={'ys':[]} xtime = np.linspace(0,trialdur,int((trialdur)*fs_)) ys = [] xs = [] spks = [] for i,t in enumerate(trial_times_): win0 = int(t*fs_) win1 = win0+int(trialdur*fs_) y_trial = dict(src_data.data)['y'][win0:win1] y = deepcopy(dict(src_data.data)['y'][win0:win1]) y[y<=spk_thresh_] = 0 #find peaks of y peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) spks.extend(peak_t) for j,s in enumerate(spks): if (((s-offset_t)>0) & ((s+offset_t)<trialdur)): win0_ = int((s-offset_t)*fs_) win1_ = int((s+offset_t)*fs_) ys.append(deepcopy(y_trial[win0_:win1_])) datamat['ys'] = np.asarray(ys).T data_spikes.data = datamat print('total spikes = len(ys) = ' + str(len(ys))) data_plot4.data = { 'x':np.linspace(-offset_t*1000,offset_t*1000,int((windur)*fs_)), 'y':deepcopy(dict(data_spikes.data)['ys'][:,0]) } plot_spike4.update(high=len(ys)-1) plot_spike4.update(value=0) spklist4.update(value='0') spklist4.update(value='all') def update_plot4(attr,old,new): spknum = int(plot_spike4.value) print('spike number being plotted = ' + str(spknum)) x = data_plot4.data['x'] data_plot4.data = { 'x':x, 'y':deepcopy(dict(data_spikes.data))['ys'][:,spknum]} def update_overlay4(attr,old,new): print('overlaying specified spikes') spks_to_overlay = spklist4.value.split(',') print(spks_to_overlay) if spks_to_overlay[0]=='all': spks_to_overlay = np.arange(np.shape(data_spikes.data['ys'])[1]) print('plotting all spikes; total number = ') print(np.shape(data_spikes.data['ys'])[1]) spks_to_overlay = [int(i) for i in spks_to_overlay] x = data_plot4.data['x'] xs = [] ys = [] for i in spks_to_overlay: xs.append(x) ys.append(deepcopy(dict(data_spikes.data))['ys'][:,i]) print('updating overlay data') data_overlay4.data = { 'xs':xs, 'ys':ys } print('updating mean of overlay') data_overlay4_mean.data = { 'x':x, 'y':np.mean(np.asarray(ys),0) } # create figure for tab 4 p4 = figure(plot_width=1000, plot_height=500, tools=['hover','pan','box_zoom','wheel_zoom','reset','save'], y_axis_label='V', x_axis_label='time from spike peak (milliseconds)', title='individual spikes') # create figure 2 for tab 4 p4b = figure(plot_width=1000, plot_height=500, tools=['hover','pan','box_zoom','wheel_zoom','reset','save'], y_axis_label='V', x_axis_label='time from spike peak (milliseconds)', title='overlay of chosen spikes') # create text input for trial duration trial_duration4 = TextInput(title="Duration of trials (seconds)",value='1', width=100) trial_duration4.on_change("value",update_data4) # create text input for trial times trial_times4 = TextInput(title="List of Trial Start times (comma-separated no spaces; seconds)", width=800) trial_times4.on_change("value",update_data4) # create text input for spike threshold spk_thresh4 = TextInput(title="Spike Threshold (from examining raw data; seconds)",value='0.05', width=100) spk_thresh4.on_change("value",update_data4) plot_duration4 = TextInput(title="Duration of analysis window (seconds)",value='0.005', width=100) plot_duration4.on_change("value",update_data4) plot_spike4 = Spinner(title="spike number to plot",low=0, width=100) plot_spike4.on_change("value",update_plot4) spklist4 = TextInput(title="spike indices to overlay (either 'all' or a comma-separated list of indices", value='all', width=800) spklist4.on_change("value",update_overlay4) # create a button to export current waveform to concatenate with an h5 file """ each waveform exported must have same duration """ # button_exportTOh5 = Button(label="Export Waveform", button_type="success") # button_exportTOh5.on_click(button_exportwaveformtoh5file) # initialize data_spikes ColumnDataSource - spike datamat to plot from data_spikes = ColumnDataSource(data = { 'ys':[] }) # initialize data_plot4 ColumnDataSource - spike waveform to plot data_plot4 = ColumnDataSource(data = { 'y':[], 'x':[] }) data_overlay4 = ColumnDataSource(data = { 'xs':[], 'ys':[] }) data_overlay4_mean = ColumnDataSource(data = { 'y':[], 'x':[] }) # initialize line plot for spike waveform p4.line(x = 'x', y='y',source =data_plot4,line_color='black') # initialize line plot for all spikes overlay # use multiline to plot data glyph = MultiLine(xs='xs',ys='ys') p4b.add_glyph(data_overlay4,glyph) """ ADD show average rate """ p4b.line(x = 'x', y='y',source =data_overlay4_mean,line_color='orange',line_width=6,alpha=0.5) # collect controls and layout all elements controls4 = [trial_times4,trial_duration4,spk_thresh4,plot_duration4,plot_spike4,spklist4] inputs4 = column(*controls4) l4 = column(inputs4,p4,p4b) # create tab tab4 = Panel(child=l4,title='plot spike waveforms') ####### # create tabs layout ###### tabs = Tabs(tabs=[tab1,tab2,tab3,tab4]) curdoc().add_root(tabs)
import sys from pathlib import Path import scipy import numpy as np import pandas as pd from scipy import ndimage from scipy.signal import find_peaks from copy import deepcopy import math from bokeh.layouts import layout, row, column, gridplot, widgetbox from bokeh.plotting import figure, show from bokeh.io import output_file, curdoc from bokeh.models import ColumnDataSource, HoverTool, CategoricalColorMapper, Column from bokeh.models import Button, RangeSlider, TextInput, CheckboxGroup from bokeh.models.widgets import Tabs, Panel, Spinner from bokeh.models import MultiLine, Line, Range1d from bokeh.palettes import Spectral6 from bokeh.themes import Theme import yaml ################# # tab 1 import data and explore ################### def button_callback(): sys.exit() # Stop the server # def import_data(attr,old,new): # """ # function called when either filepath or fs are changed # ToDo: check if file size is too big # """ # # filepath = "/Users/kperks/mnt/PerksLab_rstore/neurophysiology_lab/CockroachLeg/CockroachLeg_20K2021-07-04T09_31_20.bin" # # fs = 40000 # print('uploading data... this may take a moment. smile and relax') # f_ = filepath.value.strip() # #file_input is "new" # fs_ = int(fs.value.strip()) # # y_data = np.fromfile(Path(f_), dtype = np.float64) # y_data = y_data - np.median(y_data) # x_data = np.linspace(0,len(y_data)/fs_,len(y_data)) # max_val_slider = len(y_data)/fs_ # data = {'y' : y_data,'x' : x_data} # new_data = ColumnDataSource(data = data) # src_data.data.update(new_data.data) # # range_slider.update(end=max_val_slider) # start_ = 0 #range_slider.value[0] # stop_ = 1 #range_slider.value[1] # range_selected = [start_,stop_] # new_selection = select_data(range_selected) # data_selected.data.update(new_selection.data) # print('data uploaded') def import_data(): """ function called when either filepath or fs are changed ToDo: check if file size is too big """ # filepath = "/Users/kperks/mnt/PerksLab_rstore/neurophysiology_lab/CockroachLeg/CockroachLeg_20K2021-07-04T09_31_20.bin" # fs = 40000 print('uploading data... this may take a moment. smile and relax') f_ = filepath.value.strip() nchan_ = int(nchan.value.strip()) displaychan_ = int(displaychan.value.strip()) # nervechan_ = int(nervechan_.value.strip()) # synapchan_ = int(synapchan_.value.strip()) # simultaneous post and pre synaptic recording so two channels # nchan_=2 #file_input is "new" fs_ = int(fs.value.strip()) y_data = np.fromfile(Path(f_), dtype = np.float64) y_data = y_data.reshape(-1,nchan_) y_data = y_data[:,displaychan_] # 1 channel y_data = y_data - np.median(y_data,0) x_data = np.linspace(0,np.shape(y_data)[0]/fs_,np.shape(y_data)[0]) max_val_slider = len(y_data)/fs_ data = {'y' : y_data, 'x' : x_data} # data = {'y_syn' : y_data[:,synapchan_], 'y_nerve' : ydata[:,nervechan_], 'x' : x_data} new_data = ColumnDataSource(data = data) src_data.data.update(new_data.data) range_slider.update(end=max_val_slider) start_ = 0 #range_slider.value[0] stop_ = 1 #range_slider.value[1] range_selected = [start_,stop_] range_slider.update(value=(start_,stop_)) new_selection = select_data(range_selected) data_selected.data.update(new_selection.data) print('data uploaded') def select_data(range_selected): fs_ = int(fs.value.strip()) y = src_data.data['y'][int(range_selected[0]*fs_):int(range_selected[1]*fs_)] x = src_data.data['x'][int(range_selected[0]*fs_):int(range_selected[1]*fs_)] data = {'y' : y, 'x' : x} return ColumnDataSource(data = data) # def update_plot1_slider(attr,old,new): # start_ = range_slider.value[0] # end_ = range_slider.value[1] # new_selection = select_data([start_,end_]) # data_selected.data.update(new_selection.data) def button_plot_range_callback(): print('processing range') start_ = range_slider.value[0] end_ = range_slider.value[1] new_selection = select_data([start_,end_]) data_selected.data.update(new_selection.data) print('plot updated') # create exit button button_exit = Button(label="Exit", button_type="success",width=100) button_exit.on_click(button_callback) # PathToFile = "/Users/kperks/OneDrive - wesleyan.edu/Teaching/Neurophysiology/Data/CockroachSensoryPhysiology/40kHz/RepeatedStimulation2021-08-27T18_37_10.bin" # filepath = TextInput(title="path to data file",value="PathToFile",width=800) filepath = TextInput(title="path to data file",value="PathToFile",width=800) # create import data button button_import_data = Button(label="Import Data", button_type="success",width=100) button_import_data.on_click(import_data) # create plot range button button_plot_range = Button(label="Plot X-Range", button_type="success",width=100) button_plot_range.on_click(button_plot_range_callback) # create text inpot for sampling rate fs = TextInput(title="sampling rate",value='30000',width=100) # flexible number of channels recorded in case also did intracell nchan = TextInput(title="number of channels recorded in Bonsai",value='1',width=100) displaychan = TextInput(title="which channel to display/analyze",value='0',width=100) # create hover tool hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) # create figure p = figure(plot_width=1000, plot_height=500, tools=[hover,'pan','box_zoom','wheel_zoom','reset','save'], x_axis_label = 'seconds',y_axis_label='Volts') p.xaxis.major_label_text_font_size = "18pt" p.xaxis.axis_label_text_font_size = "18pt" p.yaxis.major_label_text_font_size = "18pt" p.yaxis.axis_label_text_font_size = "18pt" # initialize a range to plot range_selected = [0,1] # create range slider range_slider = RangeSlider( title="Adjust x-axis range", # a title to display above the slider start=0, # set the minimum value for the slider end=1, # set the maximum value for the slider step=1, # increments for the slider value=(range_selected[0],range_selected[1]), # initial values for slider (range_selected[0],range_selected[1]) width=800 ) # initialize data data = {'x':[],'y':[]} src_data = ColumnDataSource(data) data_selected = ColumnDataSource(data) # plot data within selected range as line line = p.line('x','y',source=data_selected,line_color='black') # collect controls controls = [button_exit,fs,filepath,nchan,displaychan,button_import_data,range_slider,button_plot_range] # layout controls inputs = column(*controls) # show(column(range_slider,p)) # layout all elements together l = column(inputs, p) # create tab tab1 = Panel(child=l,title='import data and explore') ################# # tab 2 overlay trials ################# # def button_saveas(): # #sys.exit() # Stop the server # print('will save dataframe for overlaid data when add function') # # convert ColumnDataSource to dataframe # # save dataframe as h5 ''' spont 0.5,1,1.5,2,2.5,3,3.5,4,24.5,24 tarsa 6.38,7.972,9.666,11.432,13.024,14.746,16.52,18.38,20.246,22.4 barb 26.725,28.926,30.818,32.649,34.561,36.446,38.366,40.305,42.442,44.325 ''' def update_overlay(): fs_ = int(fs.value.strip()) filtert = int(0.01*fs_) offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration2.value.strip()) # sec trial_times_ = trial_times2.value.split(',') trial_times_ = [float(t) for t in trial_times_] #create a new dictionary to store new data to plot temporarily datamat={'ys':[],'xs':[]} xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) ys = [] xs = [] for i,t in enumerate(trial_times_): xs.append(xtime) win0 = int((t-offset_t)*fs_) win1 = win0+int((windur+offset_t)*fs_) y = src_data.data['y'][win0:win1] y = y - np.mean(y) y = np.abs(y) y = ndimage.gaussian_filter(y,filtert) ys.append(y) datamat['ys'] = ys datamat['xs'] = xs data_overlay.data = datamat if do_average.active: if do_average.active[0]==0: data_mean.data = {'x':np.mean(np.asarray(xs),0),'y':np.mean(np.asarray(ys),0)} if not do_average.active: data_mean.data = {'x':[],'y':[]} ################ #create a new dictionary to store raw data to get spikes ############### # create save button # button_save = Button(label="Save", button_type="success", width=100) # button_save.on_click(button_saveas) # create text input for trial times trial_times2 = TextInput(title="List of Trial Start times (comma-separated; seconds)", width=800) # trial_times2.on_change("value",update_overlay) # check whether to plot overlay or not labels = ["plot average across trials"] do_average = CheckboxGroup(labels=labels, active=[0]) # do_average.on_change("active",update_overlay) # create text input for trial times trial_duration2 = TextInput(title="Duration of plot window (seconds)",value='1', width=100) # trial_duration2.on_change("value",update_overlay) button_update_overlay2 = Button(label="Update Plot", button_type="success",width=100) button_update_overlay2.on_click(update_overlay) ymin = TextInput(title="Duration of plot window (seconds)",value='1', width=100) # hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) p2 = figure(plot_width=1000, plot_height=500, tools=['hover','pan','box_zoom','wheel_zoom','reset','save'], x_axis_label='time from stimulus onset (seconds)',y_axis_label='amplitude (arbitrary units)') p2.xaxis.major_label_text_font_size = "18pt" p2.xaxis.axis_label_text_font_size = "18pt" p2.yaxis.major_label_text_font_size = "18pt" p2.yaxis.axis_label_text_font_size = "18pt" # p2.x_range = Range1d(20, 25) # p2.y_range = Range1d(-0.1, 0.1) # get fs_ from text input on tab1 fs_ = int(fs.value.strip()) # hard-coded values for window duration and offset currently also in update function offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration2.value.strip()) # sec # initialize xtime xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) # initialize data_overlay ColumnDataSource data_overlay = ColumnDataSource(data = { 'ys':[np.zeros(int((windur+offset_t)*fs_))], 'xs':[xtime] }) data_mean = ColumnDataSource(data = { 'y':[np.zeros(int((windur+offset_t)*fs_))], 'x':[xtime] }) # use multiline to plot data glyph = MultiLine(xs='xs',ys='ys') p2.add_glyph(data_overlay,glyph) """ ADD show average rate """ p2.line(x = 'x', y='y',source =data_mean,line_color='red',line_width=4) ######### # is there a way to have each line a different color? --yes with a colors list (needs to change with size datamat) # or when hover on a line it is highlighted? ######### # collect controls and layout all elements controls2 = [trial_times2,do_average,trial_duration2,button_update_overlay2]#,button_save] inputs2 = column(*controls2) l2 = column(inputs2,p2) # create tab tab2 = Panel(child=l2,title='overlay trials') ################ # tab3 spike counts and raster ################# # def button_expfit(): # #sys.exit() # Stop the server # print('will do an exponential fit on selected data') def update_plot3(): print('calculating average rate and updating plots') fs_ = int(fs.value.strip()) filtert = int(0.01*fs_) offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration3.value.strip()) # sec trial_times_ = trial_times3.value.split(',') #trial times list text input must not have spaces trial_times_ = [float(t) for t in trial_times_] spk_thresh_ = float(spk_thresh3.value.strip()) xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) win0 = int((trial_times_[0]-offset_t)*fs_) win1 = win0+int((windur+offset_t)*fs_) y = deepcopy(dict(src_data.data)['y'][win0:win1]) data_trial3.data = {'x' : xtime,'y' : deepcopy(dict(src_data.data)['y'][win0:win1])} y[y<=spk_thresh_] = 0 #find peaks of y peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) data_spktimes3.data = {'x':peak_t,'y':np.zeros(len(peak_t))} #create a new dictionary to store new data to plot temporarily datamat={'y':[],'x':[]} binsize = float(bin_size3.value.strip()) spks = [] for i,t in enumerate(trial_times_): win0 = int(t*fs_) win1 = win0+int((windur+offset_t)*fs_) y = deepcopy(dict(src_data.data)['y'][win0:win1]) xtime = deepcopy(dict(src_data.data)['x'][win0:win1])-t y[y<=spk_thresh_] = 0 #find peaks of y peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) spks.extend(peak_t) bins = np.arange(0,windur+binsize,binsize) h,bin_edges = np.histogram(spks,bins) avg_rate_response = h/binsize/len(trial_times_) #(number of spikes per bin divided by duration of bin divided by number of trials) #now get hist for pre-stim spks = [] for i,t in enumerate(trial_times_): win0 = int((t-offset_t)*fs_) win1 = int(t*fs_) y = deepcopy(dict(src_data.data)['y'][win0:win1]) xtime = deepcopy(dict(src_data.data)['x'][win0:win1])-t y[y<=spk_thresh_] = 0 peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) spks.extend(peak_t) bins = np.arange(-offset_t,0+binsize,binsize) h,bin_edges_base = np.histogram(spks,bins) avg_rate_base = h/binsize/len(trial_times_) print('n trials = ') print(len(trial_times_)) print('n spks baseline = ') print(len(peak_t)) datamat['y'] = np.concatenate([avg_rate_base,avg_rate_response]) #np.asarray(ys) # datamat['x'] = bins[0:-1] #np.asarray(xs) datamat['x'] = np.concatenate([bin_edges_base[0:-1],bin_edges[0:-1]]) data_scatter.data = datamat ydr3.start= -10 ydr3.end= np.max(np.concatenate([avg_rate_base,avg_rate_response]))+10 xdr3.start = -offset_t xdr3.end = windur xdr3 = Range1d(start=-0.5,end=1) ydr3 = Range1d(start=-10,end=1000) hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) # create figure for tab 3 p3 = figure(plot_width=1000, plot_height=500, tools=[hover,'pan','box_zoom','lasso_select','wheel_zoom','reset','save'], y_axis_label='average spike rate per bin', x_axis_label='time from stimulus onset (seconds)', x_range=xdr3,y_range=ydr3) p3.xaxis.major_label_text_font_size = "18pt" p3.xaxis.axis_label_text_font_size = "18pt" p3.yaxis.major_label_text_font_size = "18pt" p3.yaxis.axis_label_text_font_size = "18pt" # hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) p3b = figure(plot_width=1000, plot_height=500, tools=[hover,'pan','box_zoom','wheel_zoom','reset','save'], title = 'Example trial (first trial listed) to show spike detection', x_axis_label = 'seconds') p3b.xaxis.major_label_text_font_size = "18pt" p3b.xaxis.axis_label_text_font_size = "18pt" p3b.yaxis.major_label_text_font_size = "18pt" p3b.yaxis.axis_label_text_font_size = "18pt" # create exp fit button # button_dofit = Button(label="Fit Data", button_type="success", width=100) # button_dofit.on_click(button_expfit) # create text input for trial duration trial_duration3 = TextInput(title="Duration of plot window",value='1', width=100) # trial_duration3.on_change("value",update_plot3) # create text input for trial times trial_times3 = TextInput(title="List of Trial Start times (comma-separated no spaces; seconds)", width=800) # trial_times3.on_change("value",update_plot3) # create text input for spike threshold spk_thresh3 = TextInput(title="Spike Threshold (from examining raw data; seconds)",value='0.04', width=100) # spk_thresh3.on_change("value",update_plot3) # create text input for bin size of histogram for spike rate bin_size3 = TextInput(title="Bin Width to Calculate Spike Rate (seconds)",value='0.01', width=100) # bin_size3.on_change("value",update_plot3) button_update_plot3 = Button(label="Update Plot", button_type="success",width=100) button_update_plot3.on_click(update_plot3) # hard-coded values for offset currently also in update function offset_t = 0.5 # 500 msec include a pre-stimulus onset windur = float(trial_duration3.value.strip()) # sec # initialize xtime xtime = np.linspace(-offset_t,windur,int((windur+offset_t)*fs_)) # initialize data_overlay ColumnDataSource data_scatter = ColumnDataSource(data = { 'y':np.zeros(int((windur+offset_t)*fs_)), 'x':xtime }) # use multiline to plot data p3.circle(x='x',y='y',source=data_scatter,color='black') # initialize data_overlay ColumnDataSource data_trial3 = ColumnDataSource(data = { 'y':[np.zeros(int((windur+offset_t)*fs_))], 'x':[xtime] }) data_spktimes3 = ColumnDataSource(data = { 'y':np.zeros(int((windur+offset_t)*fs_)), 'x':xtime }) # use multiline to plot data p3b.line(x = 'x', y='y',source =data_trial3,line_color='black') p3b.circle(x='x',y='y',source=data_spktimes3,color='red',size=6,level='overlay') # collect controls and layout all elements controls3 = [trial_times3,trial_duration3,spk_thresh3,bin_size3,button_update_plot3]#,button_dofit] inputs3 = column(*controls3) l3 = column(inputs3,p3,p3b) # create tab tab3 = Panel(child=l3,title='Spiking Response Histogram') ############# # tab 4 scroll through spike waveforms ############# def update_data4(attr,old,new): """ creates a matrix with each column a different spike waveform rows are time/samples """ fs_ = int(fs.value.strip()) # filtert = int(0.01*fs_) windur = float(plot_duration4.value.strip()) print('windur =' ) print(windur) offset_t = windur/2 # 500 msec include a pre-stimulus onset trialdur = float(trial_duration4.value.strip()) # sec print('trialdur =') print(trialdur) trial_times_ = trial_times4.value.split(',') #trial times list text input must not have spaces trial_times_ = [float(t) for t in trial_times_] print('trial_times_= ') print(trial_times_) spk_thresh_ = float(spk_thresh4.value.strip()) print('spk_thresh_ =') print(spk_thresh_) #create a new dictionary to store new data to plot temporarily datamat={'ys':[]} xtime = np.linspace(0,trialdur,int((trialdur)*fs_)) ys = [] xs = [] spks = [] for i,t in enumerate(trial_times_): win0 = int(t*fs_) win1 = win0+int(trialdur*fs_) y_trial = dict(src_data.data)['y'][win0:win1] y = deepcopy(dict(src_data.data)['y'][win0:win1]) y[y<=spk_thresh_] = 0 #find peaks of y peaks,props = find_peaks(y,distance = int(fs_*0.0005)) peak_t = np.asarray([xtime[p] for p in peaks]) spks.extend(peak_t) for j,s in enumerate(spks): if (((s-offset_t)>0) & ((s+offset_t)<trialdur)): win0_ = int((s-offset_t)*fs_) win1_ = int((s+offset_t)*fs_) ys.append(deepcopy(y_trial[win0_:win1_])) datamat['ys'] = np.asarray(ys).T data_spikes.data = datamat print('total spikes = len(ys) = ' + str(len(ys))) data_plot4.data = { 'x':np.linspace(-offset_t*1000,offset_t*1000,int((windur)*fs_)), 'y':deepcopy(dict(data_spikes.data)['ys'][:,0]) } plot_spike4.update(high=len(ys)-1) plot_spike4.update(value=0) spklist4.update(value='0') spklist4.update(value='all') def update_plot4(attr,old,new): spknum = int(plot_spike4.value) print('spike number being plotted = ' + str(spknum)) x = data_plot4.data['x'] data_plot4.data = { 'x':x, 'y':deepcopy(dict(data_spikes.data))['ys'][:,spknum]} def update_overlay4(attr,old,new): print('overlaying specified spikes') spks_to_overlay = spklist4.value.split(',') print(spks_to_overlay) if spks_to_overlay[0]=='all': spks_to_overlay = np.arange(np.shape(data_spikes.data['ys'])[1]) print('plotting all spikes; total number = ') print(np.shape(data_spikes.data['ys'])[1]) spks_to_overlay = [int(i) for i in spks_to_overlay] x = data_plot4.data['x'] xs = [] ys = [] for i in spks_to_overlay: xs.append(x) ys.append(deepcopy(dict(data_spikes.data))['ys'][:,i]) print('updating overlay data') data_overlay4.data = { 'xs':xs, 'ys':ys } print('updating mean of overlay') data_overlay4_mean.data = { 'x':x, 'y':np.mean(np.asarray(ys),0) } # create figure for tab 4 p4 = figure(plot_width=1000, plot_height=500, tools=['hover','pan','box_zoom','wheel_zoom','reset','save'], y_axis_label='V', x_axis_label='time from spike peak (milliseconds)', title='individual spikes') # create figure 2 for tab 4 p4b = figure(plot_width=1000, plot_height=500, tools=['hover','pan','box_zoom','wheel_zoom','reset','save'], y_axis_label='V', x_axis_label='time from spike peak (milliseconds)', title='overlay of chosen spikes') # create text input for trial duration trial_duration4 = TextInput(title="Duration of trials (seconds)",value='1', width=100) trial_duration4.on_change("value",update_data4) # create text input for trial times trial_times4 = TextInput(title="List of Trial Start times (comma-separated no spaces; seconds)", width=800) trial_times4.on_change("value",update_data4) # create text input for spike threshold spk_thresh4 = TextInput(title="Spike Threshold (from examining raw data; seconds)",value='0.05', width=100) spk_thresh4.on_change("value",update_data4) plot_duration4 = TextInput(title="Duration of analysis window (seconds)",value='0.005', width=100) plot_duration4.on_change("value",update_data4) plot_spike4 = Spinner(title="spike number to plot",low=0, width=100) plot_spike4.on_change("value",update_plot4) spklist4 = TextInput(title="spike indices to overlay (either 'all' or a comma-separated list of indices", value='all', width=800) spklist4.on_change("value",update_overlay4) # create a button to export current waveform to concatenate with an h5 file """ each waveform exported must have same duration """ # button_exportTOh5 = Button(label="Export Waveform", button_type="success") # button_exportTOh5.on_click(button_exportwaveformtoh5file) # initialize data_spikes ColumnDataSource - spike datamat to plot from data_spikes = ColumnDataSource(data = { 'ys':[] }) # initialize data_plot4 ColumnDataSource - spike waveform to plot data_plot4 = ColumnDataSource(data = { 'y':[], 'x':[] }) data_overlay4 = ColumnDataSource(data = { 'xs':[], 'ys':[] }) data_overlay4_mean = ColumnDataSource(data = { 'y':[], 'x':[] }) # initialize line plot for spike waveform p4.line(x = 'x', y='y',source =data_plot4,line_color='black') # initialize line plot for all spikes overlay # use multiline to plot data glyph = MultiLine(xs='xs',ys='ys') p4b.add_glyph(data_overlay4,glyph) """ ADD show average rate """ p4b.line(x = 'x', y='y',source =data_overlay4_mean,line_color='orange',line_width=6,alpha=0.5) # collect controls and layout all elements controls4 = [trial_times4,trial_duration4,spk_thresh4,plot_duration4,plot_spike4,spklist4] inputs4 = column(*controls4) l4 = column(inputs4,p4,p4b) # create tab tab4 = Panel(child=l4,title='plot spike waveforms') ####### # create tabs layout ###### tabs = Tabs(tabs=[tab1,tab2,tab3,tab4]) curdoc().add_root(tabs)
en
0.519811
################# # tab 1 import data and explore ################### # Stop the server # def import_data(attr,old,new): # """ # function called when either filepath or fs are changed # ToDo: check if file size is too big # """ # # filepath = "/Users/kperks/mnt/PerksLab_rstore/neurophysiology_lab/CockroachLeg/CockroachLeg_20K2021-07-04T09_31_20.bin" # # fs = 40000 # print('uploading data... this may take a moment. smile and relax') # f_ = filepath.value.strip() # #file_input is "new" # fs_ = int(fs.value.strip()) # # y_data = np.fromfile(Path(f_), dtype = np.float64) # y_data = y_data - np.median(y_data) # x_data = np.linspace(0,len(y_data)/fs_,len(y_data)) # max_val_slider = len(y_data)/fs_ # data = {'y' : y_data,'x' : x_data} # new_data = ColumnDataSource(data = data) # src_data.data.update(new_data.data) # # range_slider.update(end=max_val_slider) # start_ = 0 #range_slider.value[0] # stop_ = 1 #range_slider.value[1] # range_selected = [start_,stop_] # new_selection = select_data(range_selected) # data_selected.data.update(new_selection.data) # print('data uploaded') function called when either filepath or fs are changed ToDo: check if file size is too big # filepath = "/Users/kperks/mnt/PerksLab_rstore/neurophysiology_lab/CockroachLeg/CockroachLeg_20K2021-07-04T09_31_20.bin" # fs = 40000 # nervechan_ = int(nervechan_.value.strip()) # synapchan_ = int(synapchan_.value.strip()) # simultaneous post and pre synaptic recording so two channels # nchan_=2 #file_input is "new" # 1 channel # data = {'y_syn' : y_data[:,synapchan_], 'y_nerve' : ydata[:,nervechan_], 'x' : x_data} #range_slider.value[0] #range_slider.value[1] # def update_plot1_slider(attr,old,new): # start_ = range_slider.value[0] # end_ = range_slider.value[1] # new_selection = select_data([start_,end_]) # data_selected.data.update(new_selection.data) # create exit button # PathToFile = "/Users/kperks/OneDrive - wesleyan.edu/Teaching/Neurophysiology/Data/CockroachSensoryPhysiology/40kHz/RepeatedStimulation2021-08-27T18_37_10.bin" # filepath = TextInput(title="path to data file",value="PathToFile",width=800) # create import data button # create plot range button # create text inpot for sampling rate # flexible number of channels recorded in case also did intracell # create hover tool # create figure # initialize a range to plot # create range slider # a title to display above the slider # set the minimum value for the slider # set the maximum value for the slider # increments for the slider # initial values for slider (range_selected[0],range_selected[1]) # initialize data # plot data within selected range as line # collect controls # layout controls # show(column(range_slider,p)) # layout all elements together # create tab ################# # tab 2 overlay trials ################# # def button_saveas(): # #sys.exit() # Stop the server # print('will save dataframe for overlaid data when add function') # # convert ColumnDataSource to dataframe # # save dataframe as h5 spont 0.5,1,1.5,2,2.5,3,3.5,4,24.5,24 tarsa 6.38,7.972,9.666,11.432,13.024,14.746,16.52,18.38,20.246,22.4 barb 26.725,28.926,30.818,32.649,34.561,36.446,38.366,40.305,42.442,44.325 # 500 msec include a pre-stimulus onset # sec #create a new dictionary to store new data to plot temporarily ################ #create a new dictionary to store raw data to get spikes ############### # create save button # button_save = Button(label="Save", button_type="success", width=100) # button_save.on_click(button_saveas) # create text input for trial times # trial_times2.on_change("value",update_overlay) # check whether to plot overlay or not # do_average.on_change("active",update_overlay) # create text input for trial times # trial_duration2.on_change("value",update_overlay) # hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) # p2.x_range = Range1d(20, 25) # p2.y_range = Range1d(-0.1, 0.1) # get fs_ from text input on tab1 # hard-coded values for window duration and offset currently also in update function # 500 msec include a pre-stimulus onset # sec # initialize xtime # initialize data_overlay ColumnDataSource # use multiline to plot data ADD show average rate ######### # is there a way to have each line a different color? --yes with a colors list (needs to change with size datamat) # or when hover on a line it is highlighted? ######### # collect controls and layout all elements #,button_save] # create tab ################ # tab3 spike counts and raster ################# # def button_expfit(): # #sys.exit() # Stop the server # print('will do an exponential fit on selected data') # 500 msec include a pre-stimulus onset # sec #trial times list text input must not have spaces #find peaks of y #create a new dictionary to store new data to plot temporarily #find peaks of y #(number of spikes per bin divided by duration of bin divided by number of trials) #now get hist for pre-stim #np.asarray(ys) # datamat['x'] = bins[0:-1] #np.asarray(xs) # create figure for tab 3 # hover = HoverTool(tooltips=[('mV', '@y'), ('time', '@x')]) # create exp fit button # button_dofit = Button(label="Fit Data", button_type="success", width=100) # button_dofit.on_click(button_expfit) # create text input for trial duration # trial_duration3.on_change("value",update_plot3) # create text input for trial times # trial_times3.on_change("value",update_plot3) # create text input for spike threshold # spk_thresh3.on_change("value",update_plot3) # create text input for bin size of histogram for spike rate # bin_size3.on_change("value",update_plot3) # hard-coded values for offset currently also in update function # 500 msec include a pre-stimulus onset # sec # initialize xtime # initialize data_overlay ColumnDataSource # use multiline to plot data # initialize data_overlay ColumnDataSource # use multiline to plot data # collect controls and layout all elements #,button_dofit] # create tab ############# # tab 4 scroll through spike waveforms ############# creates a matrix with each column a different spike waveform rows are time/samples # filtert = int(0.01*fs_) # 500 msec include a pre-stimulus onset # sec #trial times list text input must not have spaces #create a new dictionary to store new data to plot temporarily #find peaks of y # create figure for tab 4 # create figure 2 for tab 4 # create text input for trial duration # create text input for trial times # create text input for spike threshold # create a button to export current waveform to concatenate with an h5 file each waveform exported must have same duration # button_exportTOh5 = Button(label="Export Waveform", button_type="success") # button_exportTOh5.on_click(button_exportwaveformtoh5file) # initialize data_spikes ColumnDataSource - spike datamat to plot from # initialize data_plot4 ColumnDataSource - spike waveform to plot # initialize line plot for spike waveform # initialize line plot for all spikes overlay # use multiline to plot data ADD show average rate # collect controls and layout all elements # create tab ####### # create tabs layout ######
1.929973
2
plotter/main.py
jmpargana/StackOverflowDataset
0
6617583
from plotter import Plotter import os def main(): plotter = Plotter( os.getenv("MONGO_URI") or "mongodb://mongo_app:27017/", os.getenv("MAX_TAGS") or 35, ) plotter.create_graph() if __name__ == "__main__": main()
from plotter import Plotter import os def main(): plotter = Plotter( os.getenv("MONGO_URI") or "mongodb://mongo_app:27017/", os.getenv("MAX_TAGS") or 35, ) plotter.create_graph() if __name__ == "__main__": main()
none
1
2.028018
2
poetry/apps/corpus/migrations/0004_auto_20170414_2304.py
IlyaGusev/PoetryCorpus
45
6617584
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('corpus', '0003_generationsettings_line'), ] operations = [ migrations.AlterModelOptions( name='poem', options={'verbose_name': 'Стихотворение', 'verbose_name_plural': 'Стихотворения', 'permissions': (('can_view_restricted_poems', 'Can view restricted poems'),)}, ), migrations.AddField( model_name='poem', name='is_restricted', field=models.BooleanField(verbose_name='Стихи с ограниченным доступом', default=False), ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('corpus', '0003_generationsettings_line'), ] operations = [ migrations.AlterModelOptions( name='poem', options={'verbose_name': 'Стихотворение', 'verbose_name_plural': 'Стихотворения', 'permissions': (('can_view_restricted_poems', 'Can view restricted poems'),)}, ), migrations.AddField( model_name='poem', name='is_restricted', field=models.BooleanField(verbose_name='Стихи с ограниченным доступом', default=False), ), ]
en
0.769321
# -*- coding: utf-8 -*-
1.496465
1
pyrecon/tests/test_utils.py
cosmodesi/pyrecon
2
6617585
import re import numpy as np from pyrecon import utils from pyrecon.utils import DistanceToRedshift def decode_eval_str(s): # change ${col} => col, and return list of columns toret = str(s) columns = [] for replace in re.finditer(r'(\${.*?})', s): value = replace.group(1) col = value[2:-1] toret = toret.replace(value, col) if col not in columns: columns.append(col) return toret, columns def test_decode_eval_str(): s = '(${RA}>0.) & (${RA}<30.) & (${DEC}>0.) & (${DEC}<30.)' s, cols = decode_eval_str(s) print(s, cols) def test_distance_to_redshift(): def distance(z): return z**2 d2z = DistanceToRedshift(distance) z = np.linspace(0., 20., 200) d = distance(z) assert np.allclose(d2z(d), z) for itemsize in [4, 8]: assert d2z(d.astype('f{:d}'.format(itemsize))).itemsize == itemsize def test_random(): positions = utils.random_box_positions(10., boxcenter=5., size=100, dtype='f4') assert positions.shape == (100, 3) assert positions.dtype.itemsize == 4 assert (positions.min() >= 0.) and (positions.max() <= 10.) positions = utils.random_box_positions(10., nbar=2) assert positions.shape[0] == 2000 assert (positions.min() >= -5.) and (positions.max() <= 5.) def test_cartesian_to_sky(): for dtype in ['f4', 'f8']: dtype = np.dtype(dtype) positions = utils.random_box_positions(10., boxcenter=15., size=100, dtype=dtype) drd = utils.cartesian_to_sky(positions) assert all(array.dtype.itemsize == dtype.itemsize for array in drd) positions2 = utils.sky_to_cartesian(*drd) assert positions2.dtype.itemsize == dtype.itemsize assert np.allclose(positions2, positions, rtol=1e-6 if dtype.itemsize == 4 else 1e-9) if __name__ == '__main__': test_decode_eval_str() test_distance_to_redshift() test_random() test_cartesian_to_sky()
import re import numpy as np from pyrecon import utils from pyrecon.utils import DistanceToRedshift def decode_eval_str(s): # change ${col} => col, and return list of columns toret = str(s) columns = [] for replace in re.finditer(r'(\${.*?})', s): value = replace.group(1) col = value[2:-1] toret = toret.replace(value, col) if col not in columns: columns.append(col) return toret, columns def test_decode_eval_str(): s = '(${RA}>0.) & (${RA}<30.) & (${DEC}>0.) & (${DEC}<30.)' s, cols = decode_eval_str(s) print(s, cols) def test_distance_to_redshift(): def distance(z): return z**2 d2z = DistanceToRedshift(distance) z = np.linspace(0., 20., 200) d = distance(z) assert np.allclose(d2z(d), z) for itemsize in [4, 8]: assert d2z(d.astype('f{:d}'.format(itemsize))).itemsize == itemsize def test_random(): positions = utils.random_box_positions(10., boxcenter=5., size=100, dtype='f4') assert positions.shape == (100, 3) assert positions.dtype.itemsize == 4 assert (positions.min() >= 0.) and (positions.max() <= 10.) positions = utils.random_box_positions(10., nbar=2) assert positions.shape[0] == 2000 assert (positions.min() >= -5.) and (positions.max() <= 5.) def test_cartesian_to_sky(): for dtype in ['f4', 'f8']: dtype = np.dtype(dtype) positions = utils.random_box_positions(10., boxcenter=15., size=100, dtype=dtype) drd = utils.cartesian_to_sky(positions) assert all(array.dtype.itemsize == dtype.itemsize for array in drd) positions2 = utils.sky_to_cartesian(*drd) assert positions2.dtype.itemsize == dtype.itemsize assert np.allclose(positions2, positions, rtol=1e-6 if dtype.itemsize == 4 else 1e-9) if __name__ == '__main__': test_decode_eval_str() test_distance_to_redshift() test_random() test_cartesian_to_sky()
en
0.591633
# change ${col} => col, and return list of columns
2.3957
2
question_3/heguilong.py
loongoo/GitDemo
0
6617586
<filename>question_3/heguilong.py """ File: heguilong.py Author: heguilong Email: <EMAIL> Github: https://github.com/hgleagle Description: 统计一个文件中每个单词出现的次数,列出出现频率最多的5个单词。 """ import logging import sys import re from collections import Counter logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s \ - %(message)s') class WordCount: def __init__(self, file_name): self.file_name = file_name def count_word(self, most_num): """print most counts words :most_num: print most counts words """ with open(self.file_name, 'r') as f: data = f.read().lower() # characters and single quote not split words = re.split(r'[^\w\']+', data) logging.debug(words) most_cnts_words = Counter(words).most_common(most_num) print(most_cnts_words) if __name__ == '__main__': if len(sys.argv) != 2: print('Usage: python3 heguilong.py file_name') sys.exit() word_count = WordCount(sys.argv[1]) word_count.count_word(5)
<filename>question_3/heguilong.py """ File: heguilong.py Author: heguilong Email: <EMAIL> Github: https://github.com/hgleagle Description: 统计一个文件中每个单词出现的次数,列出出现频率最多的5个单词。 """ import logging import sys import re from collections import Counter logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s \ - %(message)s') class WordCount: def __init__(self, file_name): self.file_name = file_name def count_word(self, most_num): """print most counts words :most_num: print most counts words """ with open(self.file_name, 'r') as f: data = f.read().lower() # characters and single quote not split words = re.split(r'[^\w\']+', data) logging.debug(words) most_cnts_words = Counter(words).most_common(most_num) print(most_cnts_words) if __name__ == '__main__': if len(sys.argv) != 2: print('Usage: python3 heguilong.py file_name') sys.exit() word_count = WordCount(sys.argv[1]) word_count.count_word(5)
en
0.563911
File: heguilong.py Author: heguilong Email: <EMAIL> Github: https://github.com/hgleagle Description: 统计一个文件中每个单词出现的次数,列出出现频率最多的5个单词。 print most counts words :most_num: print most counts words # characters and single quote not split
3.424554
3
guppe/atividades/secao_7/ex004.py
WesleyLucas97/cursos_python
0
6617587
<gh_stars>0 """ Faça um programa que leia um vetor de 8 posições e, em seguida, leia também dois valores X e Y quaisquer correspondentes a duas posições no vetor. Ao final seu programa deverá escrever a soma dos valores encontrados nas respectivas posições X e Y. """ lista = [] for x in range(1, 9): n1 = float(input(f'{x} - Digite: ')) lista.append(n1) x = int(input('Digite o valor de uma posição: ')) - 1 y = int(input('Digite o valor de uma posição: ')) - 1 print(f'O valor da posição {x+1} é {lista[x]}, e o da posição {y+1} é {lista[y]}')
""" Faça um programa que leia um vetor de 8 posições e, em seguida, leia também dois valores X e Y quaisquer correspondentes a duas posições no vetor. Ao final seu programa deverá escrever a soma dos valores encontrados nas respectivas posições X e Y. """ lista = [] for x in range(1, 9): n1 = float(input(f'{x} - Digite: ')) lista.append(n1) x = int(input('Digite o valor de uma posição: ')) - 1 y = int(input('Digite o valor de uma posição: ')) - 1 print(f'O valor da posição {x+1} é {lista[x]}, e o da posição {y+1} é {lista[y]}')
pt
0.995786
Faça um programa que leia um vetor de 8 posições e, em seguida, leia também dois valores X e Y quaisquer correspondentes a duas posições no vetor. Ao final seu programa deverá escrever a soma dos valores encontrados nas respectivas posições X e Y.
3.837613
4
synthdnm/clf.py
james-guevara/synthdnm
6
6617588
<gh_stars>1-10 __version__="0.1.0.1" __usage__=""" _______ __ __ __ _ _______ __ __ ______ __ _ __ __ | || | | || | | || || | | || | | | | || |_| | | _____|| |_| || |_| ||_ _|| |_| || _ || |_| || | | |_____ | || | | | | || | | || || | |_____ ||_ _|| _ | | | | || |_| || _ || | _____| | | | | | | | | | | _ || || | | || ||_|| | |_______| |___| |_| |__| |___| |__| |__||______| |_| |__||_| |_| Version {} Authors: <NAME>, <NAME>, <NAME> Contact: <EMAIL> --------------------------------------------------------------------------------- synthdnm-classify -f <in.fam> -d <in.features.txt> necessary arguments: -f, --fam PATH PLINK pedigree (.fam/.ped) file -d, --features PATH feature file optional arguments: -s, --snp_classifier PATH path to snp classifier joblib file -l, --indel_classifier PATH path to indel classifier joblib file -p, --keep_all_putative_dnms flag that retains all putative dnms (and their scores) in the output files -h, --help show this message and exit """.format(__version__) import pandas as pd # from sklearn.externals import joblib import joblib import os,sys import numpy as np def classify_dataframe(df = None, clf = None, ofh = None, mode = "a", keep_fp = False, features = None): pd.options.mode.chained_assignment = None df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna(axis=0,subset=df.columns[12:36]) # ClippingRankSum (temporary solution) df["ClippingRankSum"] = 0 if df.empty: # print("Empty dataframe.") return 0 X = df[features].to_numpy() df["pred"] = clf.predict(X) df["prob"] = clf.predict_proba(X)[:,1] if keep_fp == False: df = df.loc[df["pred"] == 1] with open(ofh, mode) as f: df.to_csv(f, sep="\t", header = False, index=False) def get_sex(fam_fh): fam = open(fam_fh, "r") fam_dict = {} for line in fam: linesplit = line.rstrip().split("\t") iid = linesplit[1] sex = linesplit[4] fam_dict[iid] = sex df = pd.Series(fam_dict).to_frame("sex") df["iid"] = df.index df.reset_index(inplace=True) df.drop(columns=["index"],inplace=True) return df def classify(feature_table=None,keep_fp=False,pseud=None,fam_fh=None,clf_snv="snp_100-12-10-2-1-0.0-100.joblib",clf_indel="indel_1000-12-25-2-1-0.0-100.joblib"): # Get classifiers clf = joblib.load(clf_snv) clf_indels = joblib.load(clf_indel) # Make dataframe from input pydnm file df = pd.read_csv(feature_table,sep="\t",dtype={"chrom": str}) # Get the list of features columns = list(df.columns) non_features = ['chrom', 'pos', 'ID', 'ref', 'alt', 'iid', 'offspring_gt', 'father_gt', 'mother_gt', 'nalt', 'filter', 'qual'] features = [elem for elem in columns if elem not in non_features] df_fam = get_sex(fam_fh) # pseud_chrX = pseud["chrX"] # pseud_chrX_interval_one = pseud_chrX[0] # pseud_chrX_interval_two = pseud_chrX[1] # pseud_chrY = pseud["chrY"] # pseud_chrY_interval_one = pseud_chrY[0] # pseud_chrY_interval_two = pseud_chrY[1] from pathlib import Path feature_filename = feature_table feature_file_stem = Path(feature_filename).stem feature_file_parent = str(Path(feature_filename).parent) + "/" feature_file_parent_stem = feature_file_parent + feature_file_stem ofh = feature_file_parent_stem + ".preds.txt" with open(feature_filename) as f: header_list = f.readline().rstrip().split("\t") header_list = header_list + ["sex","pred","prob"] df_out = pd.DataFrame(columns = header_list) df_out.to_csv(ofh, sep = "\t", index = False) df['iid']=df['iid'].astype(str) df_fam['iid']=df_fam['iid'].astype(str) df = pd.merge(df, df_fam, on="iid") df=df[~df["chrom"].str.contains("GL*")] df["chrom"]=df["chrom"].astype(str) df["chrom"] = df["chrom"].apply(lambda s: "chr" + s if not s.startswith("chr") else s) df_autosomal_SNV = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)] df_autosomal_indel = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))] # df_female_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)] # df_female_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))] # df_male_nonPAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_nonPAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1')&(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_nonPAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_nonPAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_PAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_PAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_PAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_PAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] classify_dataframe(df = df_autosomal_SNV, clf = clf, ofh = ofh, features = features, keep_fp = keep_fp) classify_dataframe(df = df_autosomal_indel,clf = clf_indels, ofh = ofh, features = features, keep_fp = keep_fp) # classify_dataframe(df_female_X_SNV,clf,ofh_new) # classify_dataframe(df_female_X_indel,clf_indels,ofh_new) # classify_dataframe(df_male_nonPAR_X_SNV,clf_chrX_snps,ofh_new) # classify_dataframe(df_male_nonPAR_Y_SNV,clf_chrY_snps,ofh_new) # classify_dataframe(df_male_nonPAR_X_indel,clf_chrX_chrY_indels,ofh_new) # classify_dataframe(df_male_nonPAR_Y_indel,clf_chrX_chrY_indels,ofh_new) # classify_dataframe(df_male_PAR_X_SNV,clf,ofh_new) # classify_dataframe(df_male_PAR_Y_SNV,clf,ofh_new) # classify_dataframe(df_male_PAR_X_indel,clf_indels,ofh_new) # classify_dataframe(df_male_PAR_Y_indel,clf_indels,ofh_new) ofb = feature_file_parent_stem + ".preds.bed" fout = open(ofb,"w") f = open(ofh,"r") make_output_bed(f = f, fout = fout) def make_output_bed(f = None, fout = None): f.readline() for line in f: linesplit = line.rstrip().split("\t") chrom,pos,ref,alt,iid,pred,prob = linesplit[0],linesplit[1],linesplit[3],linesplit[4],linesplit[5],linesplit[-2],linesplit[-1] pos_0 = str(int(pos)-1) pos_1= str(int(pos) + len(ref) - 1) ID_column = "{}:{}:{}:{}:{}:{}:{}".format(chrom,pos,ref,alt,iid,pred,prob) row = "{}\t{}\t{}\t{}\n".format(chrom,pos_0,pos_1,ID_column) fout.write(row) if __name__ == "__main__": import warnings warnings.filterwarnings("ignore") import argparse parser = argparse.ArgumentParser(usage=__usage__) # Necessary arguments parser.add_argument("-d","--features",required=True) parser.add_argument("-f","--fam",required=True) # Optional arguments parser.add_argument("-s","--snp_classifier",required=False) parser.add_argument("-i","--indel_classifier",required=False) parser.add_argument('-p',"--keep_all_putative_dnms", action='store_true') args = parser.parse_args() # feature_filename = sys.argv[1] feature_filename = args.features # ped_filename = sys.argv[2] ped_filename = args.fam if args.snp_classifier: snv_clf_filename = args.s else: snv_clf_filename = "snp_100-12-10-2-1-0.0-100.joblib" if args.indel_classifier: indel_clf_filename = args.i else: indel_clf_filename = "indel_1000-12-25-2-1-0.0-100.joblib" keep_fp = args.keep_all_putative_dnms classify(feature_table = feature_filename, fam_fh = ped_filename, clf_snv = snv_clf_filename, clf_indel = indel_clf_filename, keep_fp = keep_fp)
__version__="0.1.0.1" __usage__=""" _______ __ __ __ _ _______ __ __ ______ __ _ __ __ | || | | || | | || || | | || | | | | || |_| | | _____|| |_| || |_| ||_ _|| |_| || _ || |_| || | | |_____ | || | | | | || | | || || | |_____ ||_ _|| _ | | | | || |_| || _ || | _____| | | | | | | | | | | _ || || | | || ||_|| | |_______| |___| |_| |__| |___| |__| |__||______| |_| |__||_| |_| Version {} Authors: <NAME>, <NAME>, <NAME> Contact: <EMAIL> --------------------------------------------------------------------------------- synthdnm-classify -f <in.fam> -d <in.features.txt> necessary arguments: -f, --fam PATH PLINK pedigree (.fam/.ped) file -d, --features PATH feature file optional arguments: -s, --snp_classifier PATH path to snp classifier joblib file -l, --indel_classifier PATH path to indel classifier joblib file -p, --keep_all_putative_dnms flag that retains all putative dnms (and their scores) in the output files -h, --help show this message and exit """.format(__version__) import pandas as pd # from sklearn.externals import joblib import joblib import os,sys import numpy as np def classify_dataframe(df = None, clf = None, ofh = None, mode = "a", keep_fp = False, features = None): pd.options.mode.chained_assignment = None df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna(axis=0,subset=df.columns[12:36]) # ClippingRankSum (temporary solution) df["ClippingRankSum"] = 0 if df.empty: # print("Empty dataframe.") return 0 X = df[features].to_numpy() df["pred"] = clf.predict(X) df["prob"] = clf.predict_proba(X)[:,1] if keep_fp == False: df = df.loc[df["pred"] == 1] with open(ofh, mode) as f: df.to_csv(f, sep="\t", header = False, index=False) def get_sex(fam_fh): fam = open(fam_fh, "r") fam_dict = {} for line in fam: linesplit = line.rstrip().split("\t") iid = linesplit[1] sex = linesplit[4] fam_dict[iid] = sex df = pd.Series(fam_dict).to_frame("sex") df["iid"] = df.index df.reset_index(inplace=True) df.drop(columns=["index"],inplace=True) return df def classify(feature_table=None,keep_fp=False,pseud=None,fam_fh=None,clf_snv="snp_100-12-10-2-1-0.0-100.joblib",clf_indel="indel_1000-12-25-2-1-0.0-100.joblib"): # Get classifiers clf = joblib.load(clf_snv) clf_indels = joblib.load(clf_indel) # Make dataframe from input pydnm file df = pd.read_csv(feature_table,sep="\t",dtype={"chrom": str}) # Get the list of features columns = list(df.columns) non_features = ['chrom', 'pos', 'ID', 'ref', 'alt', 'iid', 'offspring_gt', 'father_gt', 'mother_gt', 'nalt', 'filter', 'qual'] features = [elem for elem in columns if elem not in non_features] df_fam = get_sex(fam_fh) # pseud_chrX = pseud["chrX"] # pseud_chrX_interval_one = pseud_chrX[0] # pseud_chrX_interval_two = pseud_chrX[1] # pseud_chrY = pseud["chrY"] # pseud_chrY_interval_one = pseud_chrY[0] # pseud_chrY_interval_two = pseud_chrY[1] from pathlib import Path feature_filename = feature_table feature_file_stem = Path(feature_filename).stem feature_file_parent = str(Path(feature_filename).parent) + "/" feature_file_parent_stem = feature_file_parent + feature_file_stem ofh = feature_file_parent_stem + ".preds.txt" with open(feature_filename) as f: header_list = f.readline().rstrip().split("\t") header_list = header_list + ["sex","pred","prob"] df_out = pd.DataFrame(columns = header_list) df_out.to_csv(ofh, sep = "\t", index = False) df['iid']=df['iid'].astype(str) df_fam['iid']=df_fam['iid'].astype(str) df = pd.merge(df, df_fam, on="iid") df=df[~df["chrom"].str.contains("GL*")] df["chrom"]=df["chrom"].astype(str) df["chrom"] = df["chrom"].apply(lambda s: "chr" + s if not s.startswith("chr") else s) df_autosomal_SNV = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)] df_autosomal_indel = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))] # df_female_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)] # df_female_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))] # df_male_nonPAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_nonPAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1')&(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_nonPAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_nonPAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_PAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_PAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_PAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_PAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] classify_dataframe(df = df_autosomal_SNV, clf = clf, ofh = ofh, features = features, keep_fp = keep_fp) classify_dataframe(df = df_autosomal_indel,clf = clf_indels, ofh = ofh, features = features, keep_fp = keep_fp) # classify_dataframe(df_female_X_SNV,clf,ofh_new) # classify_dataframe(df_female_X_indel,clf_indels,ofh_new) # classify_dataframe(df_male_nonPAR_X_SNV,clf_chrX_snps,ofh_new) # classify_dataframe(df_male_nonPAR_Y_SNV,clf_chrY_snps,ofh_new) # classify_dataframe(df_male_nonPAR_X_indel,clf_chrX_chrY_indels,ofh_new) # classify_dataframe(df_male_nonPAR_Y_indel,clf_chrX_chrY_indels,ofh_new) # classify_dataframe(df_male_PAR_X_SNV,clf,ofh_new) # classify_dataframe(df_male_PAR_Y_SNV,clf,ofh_new) # classify_dataframe(df_male_PAR_X_indel,clf_indels,ofh_new) # classify_dataframe(df_male_PAR_Y_indel,clf_indels,ofh_new) ofb = feature_file_parent_stem + ".preds.bed" fout = open(ofb,"w") f = open(ofh,"r") make_output_bed(f = f, fout = fout) def make_output_bed(f = None, fout = None): f.readline() for line in f: linesplit = line.rstrip().split("\t") chrom,pos,ref,alt,iid,pred,prob = linesplit[0],linesplit[1],linesplit[3],linesplit[4],linesplit[5],linesplit[-2],linesplit[-1] pos_0 = str(int(pos)-1) pos_1= str(int(pos) + len(ref) - 1) ID_column = "{}:{}:{}:{}:{}:{}:{}".format(chrom,pos,ref,alt,iid,pred,prob) row = "{}\t{}\t{}\t{}\n".format(chrom,pos_0,pos_1,ID_column) fout.write(row) if __name__ == "__main__": import warnings warnings.filterwarnings("ignore") import argparse parser = argparse.ArgumentParser(usage=__usage__) # Necessary arguments parser.add_argument("-d","--features",required=True) parser.add_argument("-f","--fam",required=True) # Optional arguments parser.add_argument("-s","--snp_classifier",required=False) parser.add_argument("-i","--indel_classifier",required=False) parser.add_argument('-p',"--keep_all_putative_dnms", action='store_true') args = parser.parse_args() # feature_filename = sys.argv[1] feature_filename = args.features # ped_filename = sys.argv[2] ped_filename = args.fam if args.snp_classifier: snv_clf_filename = args.s else: snv_clf_filename = "snp_100-12-10-2-1-0.0-100.joblib" if args.indel_classifier: indel_clf_filename = args.i else: indel_clf_filename = "indel_1000-12-25-2-1-0.0-100.joblib" keep_fp = args.keep_all_putative_dnms classify(feature_table = feature_filename, fam_fh = ped_filename, clf_snv = snv_clf_filename, clf_indel = indel_clf_filename, keep_fp = keep_fp)
en
0.213417
_______ __ __ __ _ _______ __ __ ______ __ _ __ __ | || | | || | | || || | | || | | | | || |_| | | _____|| |_| || |_| ||_ _|| |_| || _ || |_| || | | |_____ | || | | | | || | | || || | |_____ ||_ _|| _ | | | | || |_| || _ || | _____| | | | | | | | | | | _ || || | | || ||_|| | |_______| |___| |_| |__| |___| |__| |__||______| |_| |__||_| |_| Version {} Authors: <NAME>, <NAME>, <NAME> Contact: <EMAIL> --------------------------------------------------------------------------------- synthdnm-classify -f <in.fam> -d <in.features.txt> necessary arguments: -f, --fam PATH PLINK pedigree (.fam/.ped) file -d, --features PATH feature file optional arguments: -s, --snp_classifier PATH path to snp classifier joblib file -l, --indel_classifier PATH path to indel classifier joblib file -p, --keep_all_putative_dnms flag that retains all putative dnms (and their scores) in the output files -h, --help show this message and exit # from sklearn.externals import joblib # ClippingRankSum (temporary solution) # print("Empty dataframe.") # Get classifiers # Make dataframe from input pydnm file # Get the list of features # pseud_chrX = pseud["chrX"] # pseud_chrX_interval_one = pseud_chrX[0] # pseud_chrX_interval_two = pseud_chrX[1] # pseud_chrY = pseud["chrY"] # pseud_chrY_interval_one = pseud_chrY[0] # pseud_chrY_interval_two = pseud_chrY[1] # df_female_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)] # df_female_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))] # df_male_nonPAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_nonPAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1')&(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_nonPAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_nonPAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_PAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_PAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # df_male_PAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))] # df_male_PAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))] # classify_dataframe(df_female_X_SNV,clf,ofh_new) # classify_dataframe(df_female_X_indel,clf_indels,ofh_new) # classify_dataframe(df_male_nonPAR_X_SNV,clf_chrX_snps,ofh_new) # classify_dataframe(df_male_nonPAR_Y_SNV,clf_chrY_snps,ofh_new) # classify_dataframe(df_male_nonPAR_X_indel,clf_chrX_chrY_indels,ofh_new) # classify_dataframe(df_male_nonPAR_Y_indel,clf_chrX_chrY_indels,ofh_new) # classify_dataframe(df_male_PAR_X_SNV,clf,ofh_new) # classify_dataframe(df_male_PAR_Y_SNV,clf,ofh_new) # classify_dataframe(df_male_PAR_X_indel,clf_indels,ofh_new) # classify_dataframe(df_male_PAR_Y_indel,clf_indels,ofh_new) # Necessary arguments # Optional arguments # feature_filename = sys.argv[1] # ped_filename = sys.argv[2]
1.67806
2
examples/deep_predictive_maintenance/score/score.py
NunoEdgarGFlowHub/MLOps
1,068
6617589
import os import json import numpy as np import torch from azureml.core.model import Model def init(): global model model_path = Model.get_model_path('deep_pdm') print("model loaded:",model_path) model = torch.load(model_path, map_location=torch.device('cpu')) model.eval() def run(input_data): x_input = torch.tensor(json.loads(input_data)['input_data']) score,proba = model(x_input) score = score.data.numpy().argmax(axis=1).item() proba = proba.view(-1)[0].item() return {'prediction':int(score), 'likelihood':float(proba)}
import os import json import numpy as np import torch from azureml.core.model import Model def init(): global model model_path = Model.get_model_path('deep_pdm') print("model loaded:",model_path) model = torch.load(model_path, map_location=torch.device('cpu')) model.eval() def run(input_data): x_input = torch.tensor(json.loads(input_data)['input_data']) score,proba = model(x_input) score = score.data.numpy().argmax(axis=1).item() proba = proba.view(-1)[0].item() return {'prediction':int(score), 'likelihood':float(proba)}
none
1
2.6304
3
configs/common/SSConfig.py
shinezyy/gem5
10
6617590
import m5 from m5.objects import * def modifyO3CPUConfig(options, cpu): print('modifying O3 cpu config') if options.num_ROB: cpu.numROBEntries = options.num_ROB if options.num_IQ: cpu.numIQEntries = options.num_IQ if options.num_LQ: cpu.LQEntries = options.num_LQ if options.num_SQ: cpu.SQEntries = options.num_SQ if options.num_PhysReg: cpu.numPhysIntRegs = options.num_PhysReg cpu.numPhysFloatRegs = options.num_PhysReg cpu.numPhysVecRegs = options.num_PhysReg cpu.numPhysCCRegs = 0 cpu.branchPred = LTAGE()
import m5 from m5.objects import * def modifyO3CPUConfig(options, cpu): print('modifying O3 cpu config') if options.num_ROB: cpu.numROBEntries = options.num_ROB if options.num_IQ: cpu.numIQEntries = options.num_IQ if options.num_LQ: cpu.LQEntries = options.num_LQ if options.num_SQ: cpu.SQEntries = options.num_SQ if options.num_PhysReg: cpu.numPhysIntRegs = options.num_PhysReg cpu.numPhysFloatRegs = options.num_PhysReg cpu.numPhysVecRegs = options.num_PhysReg cpu.numPhysCCRegs = 0 cpu.branchPred = LTAGE()
none
1
2.445717
2
web.py
LouDnl/ESP32-3DprinterEnclosureTool
0
6617591
from time import sleep import gc try: # import the python socket api import usocket as socket except: import socket #import machine # machine settings from machine import reset def web_page(): html = """ <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta http-equiv="refresh" content="2.5; URL='/'"> <link rel="icon" href="data:,"> <title>ESP32 Micropython Web Server</title> <style> html{ font-family: Helvetica; display:inline-block; margin: 0px auto; text-align: center;} h1{ color: #344feb; padding: 2vh;} p{ font-size: 1.5rem;} .button{ display: inline-block; background-color: #eb7134; border: none; border-radius: 4px; color: white; p adding: 16px 40px; /*text-decoration: none; */ font-size: 30px; margin: 2px; cursor: pointer;} .button2{ background-color: #349ceb;} </style> </head> <body> <h1>Ender 3 v2 Enclosure</h1> <h2>Sensors:</h2> <p>Enclosure Temperature: <span class="button">""" + temp + """ &#8451;</span></p> <p>Enclosure Humidity: <span class="button button2">""" + humi + """ %rH</span></p> <p>Ender 3 v2 CPU Temperature: <span class="button">""" + therm + """ &#8451;</span></p> </body> </html>""" return html try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 80)) s.listen(5) except (Exception, AssertionError) as exc: print("Address in use, restarting", exc.args[0]) sleep(2) reset() pass while True: if gc.mem_free() < 54000: gc.collect() try: sleep(1) from lcd import myList if myList[0] != 0: temp = myList[0] else: temp = 10 if myList[1] != 0: humi = myList[1] else: humi = 10 if myList[2] != 0: therm = myList[2] else: therm = 10 except (Exception, AssertionError) as exc: print("Couldn't get information from sensors for web ", exc.args[0]) temp, humi, therm = 10, 10, 10 pass try: conn, addr = s.accept() except: print("Socket Accept Error ", exc.args[0]) # reset() pass print('Got a connection from %s' % str(addr)) try: request = conn.recv(1024) except (Exception, AssertionError) as exc: print("recv -------------", exc.args[0]) # reset() pass try: response = web_page() conn.send('HTTP/1.1 200 OK\n') conn.send('Content-Type: text/html\n') conn.send('Connection: close\n\n') conn.sendall(response) except (Exception, AssertionError) as exc: print("Connection problem", exc.args[0]) # reset() pass conn.close()
from time import sleep import gc try: # import the python socket api import usocket as socket except: import socket #import machine # machine settings from machine import reset def web_page(): html = """ <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta http-equiv="refresh" content="2.5; URL='/'"> <link rel="icon" href="data:,"> <title>ESP32 Micropython Web Server</title> <style> html{ font-family: Helvetica; display:inline-block; margin: 0px auto; text-align: center;} h1{ color: #344feb; padding: 2vh;} p{ font-size: 1.5rem;} .button{ display: inline-block; background-color: #eb7134; border: none; border-radius: 4px; color: white; p adding: 16px 40px; /*text-decoration: none; */ font-size: 30px; margin: 2px; cursor: pointer;} .button2{ background-color: #349ceb;} </style> </head> <body> <h1>Ender 3 v2 Enclosure</h1> <h2>Sensors:</h2> <p>Enclosure Temperature: <span class="button">""" + temp + """ &#8451;</span></p> <p>Enclosure Humidity: <span class="button button2">""" + humi + """ %rH</span></p> <p>Ender 3 v2 CPU Temperature: <span class="button">""" + therm + """ &#8451;</span></p> </body> </html>""" return html try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 80)) s.listen(5) except (Exception, AssertionError) as exc: print("Address in use, restarting", exc.args[0]) sleep(2) reset() pass while True: if gc.mem_free() < 54000: gc.collect() try: sleep(1) from lcd import myList if myList[0] != 0: temp = myList[0] else: temp = 10 if myList[1] != 0: humi = myList[1] else: humi = 10 if myList[2] != 0: therm = myList[2] else: therm = 10 except (Exception, AssertionError) as exc: print("Couldn't get information from sensors for web ", exc.args[0]) temp, humi, therm = 10, 10, 10 pass try: conn, addr = s.accept() except: print("Socket Accept Error ", exc.args[0]) # reset() pass print('Got a connection from %s' % str(addr)) try: request = conn.recv(1024) except (Exception, AssertionError) as exc: print("recv -------------", exc.args[0]) # reset() pass try: response = web_page() conn.send('HTTP/1.1 200 OK\n') conn.send('Content-Type: text/html\n') conn.send('Connection: close\n\n') conn.sendall(response) except (Exception, AssertionError) as exc: print("Connection problem", exc.args[0]) # reset() pass conn.close()
en
0.277539
# import the python socket api #import machine # machine settings <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta http-equiv="refresh" content="2.5; URL='/'"> <link rel="icon" href="data:,"> <title>ESP32 Micropython Web Server</title> <style> html{ font-family: Helvetica; display:inline-block; margin: 0px auto; text-align: center;} h1{ color: #344feb; padding: 2vh;} p{ font-size: 1.5rem;} .button{ display: inline-block; background-color: #eb7134; border: none; border-radius: 4px; color: white; p adding: 16px 40px; /*text-decoration: none; */ font-size: 30px; margin: 2px; cursor: pointer;} .button2{ background-color: #349ceb;} </style> </head> <body> <h1>Ender 3 v2 Enclosure</h1> <h2>Sensors:</h2> <p>Enclosure Temperature: <span class="button"> &#8451;</span></p> <p>Enclosure Humidity: <span class="button button2"> %rH</span></p> <p>Ender 3 v2 CPU Temperature: <span class="button"> &#8451;</span></p> </body> </html> # reset() # reset() # reset()
2.560722
3
src/smif/data_layer/store.py
nismod/smif
28
6617592
<reponame>nismod/smif """The store provides a common data interface to smif configuration, data and metadata. Raises ------ SmifDataNotFoundError If data cannot be found in the store when try to read from the store SmifDataExistsError If data already exists in the store when trying to write to the store (use an update method instead) SmifDataMismatchError Data presented to read, write and update methods is in the incorrect format or of wrong dimensions to that expected SmifDataReadError When unable to read data e.g. unable to handle file type or connect to database """ import itertools import logging import os from collections import OrderedDict, defaultdict from copy import deepcopy from operator import itemgetter from os.path import splitext from typing import Dict, List, Optional, Union import numpy as np # type: ignore from smif.data_layer import DataArray from smif.data_layer.abstract_data_store import DataStore from smif.data_layer.abstract_metadata_store import MetadataStore from smif.data_layer.file import (CSVDataStore, FileMetadataStore, ParquetDataStore, YamlConfigStore) from smif.data_layer.validate import (validate_sos_model_config, validate_sos_model_format) from smif.exception import SmifDataError, SmifDataNotFoundError from smif.metadata.spec import Spec class Store(): """Common interface to data store, composed of config, metadata and data store implementations. Parameters ---------- config_store: ~smif.data_layer.abstract_config_store.ConfigStore metadata_store: ~smif.data_layer.abstract_metadata_store.MetadataStore data_store: ~smif.data_layer.abstract_data_store.DataStore """ def __init__(self, config_store, metadata_store: MetadataStore, data_store: DataStore, model_base_folder="."): self.logger = logging.getLogger(__name__) self.config_store = config_store self.metadata_store = metadata_store self.data_store = data_store # base folder for any relative paths to models self.model_base_folder = str(model_base_folder) @classmethod def from_dict(cls, config): """Create Store from configuration dict """ try: interface = config['interface'] except KeyError: logging.warning('No interface provided for Results(). Assuming local_csv') interface = 'local_csv' try: directory = config['dir'] except KeyError: logging.warning("No directory provided for Results(). Assuming '.'") directory = '.' # Check that the directory is valid if not os.path.isdir(directory): raise ValueError('Expected {} to be a valid directory'.format(directory)) if interface == 'local_csv': data_store = CSVDataStore(directory) elif interface == 'local_parquet': data_store = ParquetDataStore(directory) else: raise ValueError( 'Unsupported interface "{}". Supply local_csv or local_parquet'.format( interface)) return cls( config_store=YamlConfigStore(directory), metadata_store=FileMetadataStore(directory), data_store=data_store, model_base_folder=directory ) # # CONFIG # # region Model runs def read_model_runs(self): """Read all system-of-system model runs Returns ------- list[~smif.controller.modelrun.ModelRun] """ return sorted(self.config_store.read_model_runs(), key=itemgetter('name')) def read_model_run(self, model_run_name): """Read a system-of-system model run Parameters ---------- model_run_name : str Returns ------- ~smif.controller.modelrun.ModelRun """ return self.config_store.read_model_run(model_run_name) def write_model_run(self, model_run): """Write system-of-system model run Parameters ---------- model_run : ~smif.controller.modelrun.ModelRun """ self.config_store.write_model_run(model_run) def update_model_run(self, model_run_name, model_run): """Update system-of-system model run Parameters ---------- model_run_name : str model_run : ~smif.controller.modelrun.ModelRun """ self.config_store.update_model_run(model_run_name, model_run) def delete_model_run(self, model_run_name): """Delete a system-of-system model run Parameters ---------- model_run_name : str """ self.config_store.delete_model_run(model_run_name) # endregion # region System-of-systems models def read_sos_models(self): """Read all system-of-system models Returns ------- list[~smif.model.sos_model.SosModel] """ return sorted(self.config_store.read_sos_models(), key=itemgetter('name')) def read_sos_model(self, sos_model_name): """Read a specific system-of-system model Parameters ---------- sos_model_name : str Returns ------- ~smif.model.sos_model.SosModel """ return self.config_store.read_sos_model(sos_model_name) def write_sos_model(self, sos_model): """Write system-of-system model Parameters ---------- sos_model : ~smif.model.sos_model.SosModel """ validate_sos_model_format(sos_model) self.config_store.write_sos_model(sos_model) def update_sos_model(self, sos_model_name, sos_model): """Update system-of-system model Parameters ---------- sos_model_name : str sos_model : ~smif.model.sos_model.SosModel """ models = self.config_store.read_models() scenarios = self.config_store.read_scenarios() validate_sos_model_config(sos_model, models, scenarios) self.config_store.update_sos_model(sos_model_name, sos_model) def delete_sos_model(self, sos_model_name): """Delete a system-of-system model Parameters ---------- sos_model_name : str """ self.config_store.delete_sos_model(sos_model_name) # endregion # region Models def read_models(self, skip_coords=False): """Read all models Returns ------- list[~smif.model.model.Model] """ models = sorted(self.config_store.read_models(), key=itemgetter('name')) if not skip_coords: models = [ self._add_coords(model, ('inputs', 'outputs', 'parameters')) for model in models ] return models def read_model(self, model_name, skip_coords=False): """Read a model Parameters ---------- model_name : str Returns ------- ~smif.model.model.Model """ model = self.config_store.read_model(model_name) if not skip_coords: model = self._add_coords(model, ('inputs', 'outputs', 'parameters')) return model def write_model(self, model): """Write a model Parameters ---------- model : ~smif.model.model.Model """ self.config_store.write_model(model) def update_model(self, model_name, model): """Update a model Parameters ---------- model_name : str model : ~smif.model.model.Model """ self.config_store.update_model(model_name, model) def delete_model(self, model_name): """Delete a model Parameters ---------- model_name : str """ self.config_store.delete_model(model_name) # endregion # region Scenarios def read_scenarios(self, skip_coords=False): """Read scenarios Returns ------- list[~smif.model.ScenarioModel] """ scenarios = sorted(self.config_store.read_scenarios(), key=itemgetter('name')) if not skip_coords: scenarios = [ self._add_coords(scenario, ['provides']) for scenario in scenarios ] return scenarios def read_scenario(self, scenario_name, skip_coords=False): """Read a scenario Parameters ---------- scenario_name : str Returns ------- ~smif.model.ScenarioModel """ scenario = self.config_store.read_scenario(scenario_name) if not skip_coords: scenario = self._add_coords(scenario, ['provides']) return scenario def write_scenario(self, scenario): """Write scenario Parameters ---------- scenario : ~smif.model.ScenarioModel """ self.config_store.write_scenario(scenario) def update_scenario(self, scenario_name, scenario): """Update scenario Parameters ---------- scenario_name : str scenario : ~smif.model.ScenarioModel """ self.config_store.update_scenario(scenario_name, scenario) def delete_scenario(self, scenario_name): """Delete scenario from project configuration Parameters ---------- scenario_name : str """ self.config_store.delete_scenario(scenario_name) def prepare_scenario(self, scenario_name, list_of_variants): """ Modify {scenario_name} config file to include multiple scenario variants. Parameters ---------- scenario_name: str list_of_variants: list[int] - indices of scenario variants """ scenario = self.read_scenario(scenario_name) # Check that template scenario file does not define more than one variant if not scenario['variants'] or len(scenario['variants']) > 1: raise SmifDataError("Template scenario file must define one" " unique template variant.") # Read variant defined in template scenario file variant_template_name = scenario['variants'][0]['name'] base_variant = self.read_scenario_variant(scenario_name, variant_template_name) self.delete_scenario_variant(scenario_name, variant_template_name) # Read template names of scenario variant data files output_filenames = {} # output_name => (base, ext) # root is a dict. keyed on scenario outputs. # Entries contain the root of the variants filenames for output in scenario['provides']: output_name = output['name'] base, ext = splitext(base_variant['data'][output_name]) output_filenames[output_name] = base, ext # Now modify scenario file for variant_number in list_of_variants: # Copying the variant dict is required when underlying config_store # is an instance of MemoryConfigStore, which attribute _scenarios holds # a reference to the variant object passed to update or # write_scenario_variant variant = deepcopy(base_variant) variant['name'] = '{}_{:03d}'.format(scenario_name, variant_number) variant['description'] = '{} variant number {:03d}'.format( scenario_name, variant_number) for output_name, (base, ext) in output_filenames.items(): variant['data'][output_name] = '{}{:03d}{}'.format(base, variant_number, ext) self.write_scenario_variant(scenario_name, variant) def prepare_model_runs(self, model_run_name, scenario_name, first_var, last_var): """Write multiple model run config files corresponding to multiple scenario variants of {scenario_name}, based on template {model_run_name} Write batchfile containing each of the generated model runs Parameters ---------- model_run_name: str scenario_name: str first_var: int - between 0 and number of variants-1 last_var: int - between first_var and number of variants-1 """ model_run = self.read_model_run(model_run_name) scenario = self.read_scenario(scenario_name) # read strategies from config store (Store.read_strategies pulls together data on # interventions as well, which we don't need here) config_strategies = self.config_store.read_strategies(model_run_name) # Open batchfile f_handle = open(model_run_name + '.batch', 'w') # For each variant model_run, write a new model run file with corresponding # scenario variant and update batchfile for variant in scenario['variants'][first_var:last_var + 1]: variant_model_run_name = model_run_name + '_' + variant['name'] model_run_copy = deepcopy(model_run) model_run_copy['name'] = variant_model_run_name model_run_copy['scenarios'][scenario_name] = variant['name'] self.write_model_run(model_run_copy) self.config_store.write_strategies(variant_model_run_name, config_strategies) f_handle.write(model_run_name + '_' + variant['name'] + '\n') # Close batchfile f_handle.close() # endregion # region Scenario Variants def read_scenario_variants(self, scenario_name): """Read variants of a given scenario Parameters ---------- scenario_name : str Returns ------- list[dict] """ scenario_variants = self.config_store.read_scenario_variants(scenario_name) return sorted(scenario_variants, key=itemgetter('name')) def read_scenario_variant(self, scenario_name, variant_name): """Read a scenario variant Parameters ---------- scenario_name : str variant_name : str Returns ------- dict """ return self.config_store.read_scenario_variant(scenario_name, variant_name) def write_scenario_variant(self, scenario_name, variant): """Write scenario to project configuration Parameters ---------- scenario_name : str variant : dict """ self.config_store.write_scenario_variant(scenario_name, variant) def update_scenario_variant(self, scenario_name, variant_name, variant): """Update scenario to project configuration Parameters ---------- scenario_name : str variant_name : str variant : dict """ self.config_store.update_scenario_variant(scenario_name, variant_name, variant) def delete_scenario_variant(self, scenario_name, variant_name): """Delete scenario from project configuration Parameters ---------- scenario_name : str variant_name : str """ self.config_store.delete_scenario_variant(scenario_name, variant_name) # endregion # region Narratives def read_narrative(self, sos_model_name, narrative_name): """Read narrative from sos_model Parameters ---------- sos_model_name : str narrative_name : str """ return self.config_store.read_narrative(sos_model_name, narrative_name) # endregion # region Strategies def read_strategies(self, model_run_name): """Read strategies for a given model run Parameters ---------- model_run_name : str Returns ------- list[dict] """ strategies = deepcopy(self.config_store.read_strategies(model_run_name)) for strategy in strategies: if strategy['type'] == 'pre-specified-planning': strategy['interventions'] = self.data_store.read_strategy_interventions( strategy) return strategies def write_strategies(self, model_run_name, strategies): """Write strategies for a given model_run Parameters ---------- model_run_name : str strategies : list[dict] """ self.config_store.write_strategies(model_run_name, strategies) def convert_strategies_data(self, model_run_name, tgt_store, noclobber=False): strategies = self.read_strategies(model_run_name) for strategy in strategies: if strategy['type'] == 'pre-specified-planning': data_exists = tgt_store.read_strategy_interventions( strategy, assert_exists=True) if not(noclobber and data_exists): data = self.read_strategy_interventions(strategy) tgt_store.write_strategy_interventions(strategy, data) # endregion # # METADATA # # region Units def read_unit_definitions(self) -> List[str]: """Reads custom unit definitions Returns ------- list[str] Pint-compatible unit definitions """ return self.metadata_store.read_unit_definitions() def write_unit_definitions(self, definitions): """Reads custom unit definitions Parameters ---------- list[str] Pint-compatible unit definitions """ self.metadata_store.write_unit_definitions(definitions) # endregion # region Dimensions def read_dimensions(self, skip_coords=False): """Read dimensions Returns ------- list[~smif.metadata.coords.Coords] """ return self.metadata_store.read_dimensions(skip_coords) def read_dimension(self, dimension_name, skip_coords=False): """Return dimension Parameters ---------- dimension_name : str Returns ------- ~smif.metadata.coords.Coords A dimension definition (including elements) """ return self.metadata_store.read_dimension(dimension_name, skip_coords) def write_dimension(self, dimension): """Write dimension to project configuration Parameters ---------- dimension : ~smif.metadata.coords.Coords """ self.metadata_store.write_dimension(dimension) def update_dimension(self, dimension_name, dimension): """Update dimension Parameters ---------- dimension_name : str dimension : ~smif.metadata.coords.Coords """ self.metadata_store.update_dimension(dimension_name, dimension) def delete_dimension(self, dimension_name): """Delete dimension Parameters ---------- dimension_name : str """ self.metadata_store.delete_dimension(dimension_name) def _add_coords(self, item, keys): """Add coordinates to spec definitions on an object """ item = deepcopy(item) for key in keys: spec_list = item[key] for spec in spec_list: if 'dims' in spec and spec['dims']: spec['coords'] = { dim: self.read_dimension(dim)['elements'] for dim in spec['dims'] } return item # endregion # # DATA # # region Scenario Variant Data def read_scenario_variant_data( self, scenario_name: str, variant_name: str, variable: str, timestep: Optional[int] = None, timesteps: Optional[List[int]] = None, assert_exists: bool = False) -> Union[DataArray, bool]: """Read scenario data file Parameters ---------- scenario_name : str variant_name : str variable : str timestep : int Returns ------- data : ~smif.data_layer.data_array.DataArray """ variant = self.read_scenario_variant(scenario_name, variant_name) key = self._key_from_data(variant['data'][variable], scenario_name, variant_name, variable) scenario = self.read_scenario(scenario_name) spec_dict = _pick_from_list(scenario['provides'], variable) spec = Spec.from_dict(spec_dict) if assert_exists: return self.data_store.scenario_variant_data_exists(key) else: return self.data_store.read_scenario_variant_data(key, spec, timestep, timesteps) def write_scenario_variant_data(self, scenario_name, variant_name, data): """Write scenario data file Parameters ---------- scenario_name : str variant_name : str data : ~smif.data_layer.data_array.DataArray """ variant = self.read_scenario_variant(scenario_name, variant_name) key = self._key_from_data(variant['data'][data.spec.name], scenario_name, variant_name, data.spec.name) self.data_store.write_scenario_variant_data(key, data) def convert_scenario_data(self, model_run_name, tgt_store, noclobber=False): model_run = self.read_model_run(model_run_name) # Convert scenario data for model run for scenario_name in model_run['scenarios']: for variant in self.read_scenario_variants(scenario_name): for variable in variant['data']: data_exists = tgt_store.read_scenario_variant_data( scenario_name, variant['name'], variable, assert_exists=True) if not(noclobber and data_exists): data_array = self.read_scenario_variant_data( scenario_name, variant['name'], variable) tgt_store.write_scenario_variant_data( scenario_name, variant['name'], data_array) # endregion # region Narrative Data def read_narrative_variant_data(self, sos_model_name, narrative_name, variant_name, parameter_name, timestep=None, assert_exists=False): """Read narrative data file Parameters ---------- sos_model_name : str narrative_name : str variant_name : str parameter_name : str timestep : int (optional) If None, read data for all timesteps Returns ------- ~smif.data_layer.data_array.DataArray """ sos_model = self.read_sos_model(sos_model_name) narrative = _pick_from_list(sos_model['narratives'], narrative_name) if narrative is None: msg = "Narrative name '{}' does not exist in sos_model '{}'" raise SmifDataNotFoundError(msg.format(narrative_name, sos_model_name)) variant = _pick_from_list(narrative['variants'], variant_name) if variant is None: msg = "Variant name '{}' does not exist in narrative '{}'" raise SmifDataNotFoundError(msg.format(variant_name, narrative_name)) key = self._key_from_data(variant['data'][parameter_name], narrative_name, variant_name, parameter_name) if assert_exists: return self.data_store.narrative_variant_data_exists(key) else: spec_dict = None # find sector model which needs this parameter, to get spec definition for model_name, params in narrative['provides'].items(): if parameter_name in params: sector_model = self.read_model(model_name) spec_dict = _pick_from_list(sector_model['parameters'], parameter_name) break # find spec if spec_dict is None: raise SmifDataNotFoundError("Parameter {} not found in any of {}".format( parameter_name, sos_model['sector_models'])) spec = Spec.from_dict(spec_dict) return self.data_store.read_narrative_variant_data(key, spec, timestep) def write_narrative_variant_data(self, sos_model_name, narrative_name, variant_name, data): """Read narrative data file Parameters ---------- sos_model_name : str narrative_name : str variant_name : str data : ~smif.data_layer.data_array.DataArray """ sos_model = self.read_sos_model(sos_model_name) narrative = _pick_from_list(sos_model['narratives'], narrative_name) variant = _pick_from_list(narrative['variants'], variant_name) key = self._key_from_data( variant['data'][data.spec.name], narrative_name, variant_name, data.spec.name) self.data_store.write_narrative_variant_data(key, data) def convert_narrative_data(self, sos_model_name, tgt_store, noclobber=False): sos_model = self.read_sos_model(sos_model_name) for narrative in sos_model['narratives']: for variant in narrative['variants']: for param in variant['data']: data_exists = tgt_store.read_narrative_variant_data(sos_model_name, narrative['name'], variant['name'], param, assert_exists=True) if not(noclobber and data_exists): data_array = self.read_narrative_variant_data(sos_model_name, narrative['name'], variant['name'], param) tgt_store.write_narrative_variant_data(sos_model_name, narrative['name'], variant['name'], data_array) def read_model_parameter_default(self, model_name, parameter_name, assert_exists=False): """Read default data for a sector model parameter Parameters ---------- model_name : str parameter_name : str Returns ------- ~smif.data_layer.data_array.DataArray """ model = self.read_model(model_name) param = _pick_from_list(model['parameters'], parameter_name) spec = Spec.from_dict(param) try: path = param['default'] except TypeError: raise SmifDataNotFoundError("Parameter {} not found in model {}".format( parameter_name, model_name)) except KeyError: path = 'default__{}__{}.csv'.format(model_name, parameter_name) key = self._key_from_data(path, model_name, parameter_name) if assert_exists: return self.data_store.model_parameter_default_data_exists(key) else: return self.data_store.read_model_parameter_default(key, spec) def write_model_parameter_default(self, model_name, parameter_name, data): """Write default data for a sector model parameter Parameters ---------- model_name : str parameter_name : str data : ~smif.data_layer.data_array.DataArray """ model = self.read_model(model_name, skip_coords=True) param = _pick_from_list(model['parameters'], parameter_name) try: path = param['default'] except TypeError: raise SmifDataNotFoundError("Parameter {} not found in model {}".format( parameter_name, model_name)) except KeyError: path = 'default__{}__{}.csv'.format(model_name, parameter_name) key = self._key_from_data(path, model_name, parameter_name) self.data_store.write_model_parameter_default(key, data) def convert_model_parameter_default_data(self, sector_model_name, tgt_store, noclobber=False): sector_model = self.read_model(sector_model_name) for parameter in sector_model['parameters']: data_exists = tgt_store.read_model_parameter_default(sector_model_name, parameter['name'], assert_exists=True) if not(noclobber and data_exists): data_array = self.read_model_parameter_default(sector_model_name, parameter['name']) tgt_store.write_model_parameter_default(sector_model_name, parameter['name'], data_array) # endregion # region Interventions def read_interventions(self, model_name): """Read interventions data for `model_name` Returns ------- dict[str, dict] A dict of intervention dictionaries containing intervention attributes keyed by intervention name """ model = self.read_model(model_name, skip_coords=True) if model['interventions'] != []: return self.data_store.read_interventions(model['interventions']) else: return {} def write_interventions(self, model_name, interventions): """Write interventions data for a model Parameters ---------- dict[str, dict] A dict of intervention dictionaries containing intervention attributes keyed by intervention name """ model = self.read_model(model_name) model['interventions'] = [model_name + '.csv'] self.update_model(model_name, model) self.data_store.write_interventions(model['interventions'][0], interventions) def write_interventions_file(self, model_name, string_id, interventions): model = self.read_model(model_name) if string_id in model['interventions']: self.data_store.write_interventions(string_id, interventions) else: raise SmifDataNotFoundError("Intervention {} not found for" " sector model {}.".format(string_id, model_name)) def read_interventions_file(self, model_name, string_id, assert_exists=False): model = self.read_model(model_name) if string_id in model['interventions']: if assert_exists: return self.data_store.interventions_data_exists(string_id) else: return self.data_store.read_interventions([string_id]) else: raise SmifDataNotFoundError("Intervention {} not found for" " sector model {}.".format(string_id, model_name)) def convert_interventions_data(self, sector_model_name, tgt_store, noclobber=False): sector_model = self.read_model(sector_model_name) for intervention in sector_model['interventions']: data_exists = tgt_store.read_interventions_file(sector_model_name, intervention, assert_exists=True) if not(noclobber and data_exists): interventions = self.read_interventions_file(sector_model_name, intervention) tgt_store.write_interventions_file( sector_model_name, intervention, interventions) def read_strategy_interventions(self, strategy, assert_exists=False): """Read interventions as defined in a model run strategy """ if assert_exists: return self.data_store.strategy_data_exists(strategy) else: return self.data_store.read_strategy_interventions(strategy) def write_strategy_interventions(self, strategy, data): """ Parameters ---------- list[dicts] """ self.data_store.write_strategy_interventions(strategy, data) def read_initial_conditions(self, model_name) -> List[Dict]: """Read historical interventions for `model_name` Returns ------- list[dict] A list of historical interventions, with keys 'name' and 'build_year' """ model = self.read_model(model_name) if model['initial_conditions'] != []: return self.data_store.read_initial_conditions(model['initial_conditions']) else: return [] def write_initial_conditions(self, model_name, initial_conditions): """Write historical interventions for a model Parameters ---------- list[dict] A list of historical interventions, with keys 'name' and 'build_year' """ model = self.read_model(model_name) model['initial_conditions'] = [model_name + '.csv'] self.update_model(model_name, model) self.data_store.write_initial_conditions(model['initial_conditions'][0], initial_conditions) def write_initial_conditions_file(self, model_name, string_id, initial_conditions): model = self.read_model(model_name) if string_id in model['initial_conditions']: self.data_store.write_initial_conditions(string_id, initial_conditions) else: raise SmifDataNotFoundError("Initial condition {} not found for" " sector model {}.".format(string_id, model_name)) def read_initial_conditions_file(self, model_name, string_id, assert_exists=False): model = self.read_model(model_name) if string_id in model['initial_conditions']: if assert_exists: return self.data_store.initial_conditions_data_exists(string_id) else: return self.data_store.read_initial_conditions([string_id]) else: raise SmifDataNotFoundError("Initial conditions {} not found for" " sector model {}.".format(string_id, model_name)) def convert_initial_conditions_data(self, sector_model_name, tgt_store, noclobber=False): sector_model = self.read_model(sector_model_name) for initial_condition in sector_model['initial_conditions']: data_exists = tgt_store.read_initial_conditions_file(sector_model_name, initial_condition, assert_exists=True) if not(noclobber and data_exists): initial_conditions = self.read_initial_conditions_file(sector_model_name, initial_condition) tgt_store.write_initial_conditions_file(sector_model_name, initial_condition, initial_conditions) def read_all_initial_conditions(self, model_run_name) -> List[Dict]: """A list of all historical interventions Returns ------- list[dict] """ historical_interventions = [] # type: List model_run = self.read_model_run(model_run_name) sos_model_name = model_run['sos_model'] sos_model = self.read_sos_model(sos_model_name) sector_model_names = sos_model['sector_models'] for sector_model_name in sector_model_names: historical_interventions.extend( self.read_initial_conditions(sector_model_name) ) return historical_interventions # endregion # region State def read_state(self, model_run_name, timestep, decision_iteration=None) -> List[Dict]: """Read list of (name, build_year) for a given model_run, timestep, decision Parameters ---------- model_run_name : str timestep : int decision_iteration : int, optional Returns ------- list[dict] """ return self.data_store.read_state(model_run_name, timestep, decision_iteration) def write_state(self, state, model_run_name, timestep, decision_iteration=None): """State is a list of decisions with name and build_year. State is output from the DecisionManager Parameters ---------- state : list[dict] model_run_name : str timestep : int decision_iteration : int, optional """ self.data_store.write_state(state, model_run_name, timestep, decision_iteration) # endregion # region Conversion coefficients def read_coefficients(self, source_dim: str, destination_dim: str) -> np.ndarray: """Reads coefficients from the store Coefficients are uniquely identified by their source/destination dimensions. This method and `write_coefficients` implement caching of conversion coefficients between dimensions. Parameters ---------- source_dim : str Dimension name destination_dim : str Dimension name Returns ------- numpy.ndarray Notes ----- To be called from :class:`~smif.convert.adaptor.Adaptor` implementations. """ return self.data_store.read_coefficients(source_dim, destination_dim) def write_coefficients(self, source_dim: str, destination_dim: str, data: np.ndarray): """Writes coefficients to the store Coefficients are uniquely identified by their source/destination dimensions. This method and `read_coefficients` implement caching of conversion coefficients between dimensions. Parameters ---------- source_dim : str Dimension name destination_dim : str Dimension name data : numpy.ndarray Notes ----- To be called from :class:`~smif.convert.adaptor.Adaptor` implementations. """ self.data_store.write_coefficients(source_dim, destination_dim, data) # endregion # region Results def read_results(self, model_run_name: str, model_name: str, output_spec: Spec, timestep: Optional[int] = None, decision_iteration: Optional[int] = None) -> DataArray: """Return results of a `model_name` in `model_run_name` for a given `output_name` Parameters ---------- model_run_name : str model_name : str output_spec : smif.metadata.Spec timestep : int, default=None decision_iteration : int, default=None Returns ------- ~smif.data_layer.data_array.DataArray """ return self.data_store.read_results( model_run_name, model_name, output_spec, timestep, decision_iteration) def write_results(self, data_array, model_run_name, model_name, timestep=None, decision_iteration=None): """Write results of a `model_name` in `model_run_name` for a given `output_name` Parameters ---------- data_array : ~smif.data_layer.data_array.DataArray model_run_id : str model_name : str timestep : int, optional decision_iteration : int, optional """ self.data_store.write_results( data_array, model_run_name, model_name, timestep, decision_iteration) def delete_results(self, model_run_name, model_name, output_name, timestep=None, decision_iteration=None): """Delete results for a single timestep/iteration of a model output in a model run Parameters ---------- model_run_name : str model_name : str output_name : str timestep : int, default=None decision_iteration : int, default=None """ self.data_store.delete_results( model_run_name, model_name, output_name, timestep, decision_iteration) def clear_results(self, model_run_name): """Clear all results from a single model run Parameters ---------- model_run_name : str """ available = self.available_results(model_run_name) for timestep, decision_iteration, model_name, output_name in available: self.data_store.delete_results( model_run_name, model_name, output_name, timestep, decision_iteration) def available_results(self, model_run_name): """List available results from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name, output_name) """ return self.data_store.available_results(model_run_name) def completed_jobs(self, model_run_name): """List completed jobs from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name) """ available_results = self.available_results(model_run_name) # {(t, d, model, output)} model_outputs = self.expected_model_outputs(model_run_name) # [(model, output)] completed_jobs = self.filter_complete_available_results( available_results, model_outputs) return completed_jobs @staticmethod def filter_complete_available_results(available_results, expected_model_outputs): """Filter available results from a model run to include only complete timestep/decision iteration combinations Parameters ---------- available_results: list[tuple] List of (timestep, decision_iteration, model_name, output_name) expected_model_outputs: list[tuple] List or set of (model_name, output_name) Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name) """ expected_model_outputs = set(expected_model_outputs) model_names = {model_name for model_name, _ in expected_model_outputs} model_outputs_by_td = defaultdict(set) for timestep, decision, model_name, output_name in available_results: model_outputs_by_td[(timestep, decision)].add((model_name, output_name)) completed_jobs = [] for (timestep, decision), td_model_outputs in model_outputs_by_td.items(): if td_model_outputs == expected_model_outputs: for model_name in model_names: completed_jobs.append((timestep, decision, model_name)) return completed_jobs def expected_model_outputs(self, model_run_name): """List expected model outputs from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (model_name, output_name) """ model_run = self.read_model_run(model_run_name) sos_model_name = model_run['sos_model'] sos_config = self.read_sos_model(sos_model_name) # For each model, get the outputs and create (model_name, output_name) tuples expected_model_outputs = [] for model_name in sos_config['sector_models']: model_config = self.read_model(model_name) for output in model_config['outputs']: expected_model_outputs.append((model_name, output['name'])) return expected_model_outputs def prepare_warm_start(self, model_run_name): """Copy the results from the previous model_run if available The method allows a previous unsuccessful model_run to 'warm start' a new model run from a later timestep. Model results are recovered from the timestep that the previous model_run was run until, and the new model run runs from the returned timestep Parameters ---------- model_run_name : str Returns ------- int The timestep to which the data store was recovered Notes ----- Called from smif.controller.execute """ available_results = self.available_results(model_run_name) if available_results: max_timestep = max( timestep for timestep, decision_iteration, model_name, output_name in available_results ) # could explicitly clear results for max timestep else: max_timestep = None return max_timestep def canonical_available_results(self, model_run_name): """List the results that are available from a model run, collapsing all decision iterations. This is the unique items from calling `available_results`, with all decision iterations set to 0. This method is used to determine whether a model run is complete, given that it is impossible to know how many decision iterations to expect: we simply check that each expected timestep has been completed. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing available results """ available_results = self.available_results(model_run_name) canonical_list = [] for t, d, model_name, output_name in available_results: canonical_list.append((t, 0, model_name, output_name)) # Return as a set to remove duplicates return set(canonical_list) def canonical_expected_results(self, model_run_name): """List the results that are expected from a model run, collapsing all decision iterations. For a complete model run, this would coincide with the unique list returned from `available_results`, where all decision iterations are set to 0. This method is used to determine whether a model run is complete, given that it is impossible to know how many decision iterations to expect: we simply check that each expected timestep has been completed. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing expected results """ # Model results are returned as a tuple # (timestep, decision_it, model_name, output_name) # so we first build the full list of expected results tuples. expected_results = [] # Get the sos model name given the model run name, and the full list of timesteps model_run = self.read_model_run(model_run_name) timesteps = sorted(model_run['timesteps']) sos_model_name = model_run['sos_model'] # Get the list of sector models in the sos model sos_config = self.read_sos_model(sos_model_name) # For each sector model, get the outputs and create the tuples for model_name in sos_config['sector_models']: model_config = self.read_model(model_name) outputs = model_config['outputs'] for output, t in itertools.product(outputs, timesteps): expected_results.append((t, 0, model_name, output['name'])) # Return as a set to remove duplicates return set(expected_results) def canonical_missing_results(self, model_run_name): """List the results that are missing from a model run, collapsing all decision iterations. For a complete model run, this is what is left after removing canonical_available_results from canonical_expected_results. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing missing results """ return self.canonical_expected_results( model_run_name) - self.canonical_available_results(model_run_name) def _get_result_darray_internal(self, model_run_name, model_name, output_name, time_decision_tuples): """Internal implementation for `get_result_darray`, after the unique list of (timestep, decision) tuples has been generated and validated. This method gets the spec for the output defined by the model_run_name, model_name and output_name and expands the spec to include an additional dimension for the list of tuples. Then, for each tuple, the data array from the corresponding read_results call is stacked, and together with the new spec this information is returned as a new DataArray. Parameters ---------- model_run_name : str model_name : str output_name : str time_decision_tuples : list of unique (timestep, decision) tuples Returns ------- DataArray with expanded spec and data for each (timestep, decision) tuple """ # Get the output spec given the name of the sector model and output output_spec = None model = self.read_model(model_name) for output in model['outputs']: # Ignore if the output name doesn't match if output_name != output['name']: continue output_spec = Spec.from_dict(output) assert output_spec, "Output name was not found in model outputs" # Read the results for each (timestep, decision) tuple and stack them list_of_numpy_arrays = [] for t, d in time_decision_tuples: d_array = self.read_results(model_run_name, model_name, output_spec, t, d) list_of_numpy_arrays.append(d_array.data) stacked_data = np.vstack(list_of_numpy_arrays) # Add new dimensions to the data spec output_dict = output_spec.as_dict() output_dict['dims'] = ['timestep_decision'] + output_dict['dims'] output_dict['coords']['timestep_decision'] = time_decision_tuples output_spec = Spec.from_dict(output_dict) # Create a new DataArray from the modified spec and stacked data return DataArray(output_spec, np.reshape(stacked_data, output_spec.shape)) def get_result_darray(self, model_run_name, model_name, output_name, timesteps=None, decision_iterations=None, time_decision_tuples=None): """Return data for multiple timesteps and decision iterations for a given output from a given sector model in a specific model run. You can specify either: a list of (timestep, decision) tuples in which case data for all of those tuples matching the available results will be returned or: a list of timesteps in which case data for all of those timesteps (and any decision iterations) matching the available results will be returned or: a list of decision iterations in which case data for all of those decision iterations (and any timesteps) matching the available results will be returned or: a list of timesteps and a list of decision iterations in which case data for the Cartesian product of those timesteps and those decision iterations matching the available results will be returned or: nothing in which case all available results will be returned Then, for each tuple, the data array from the corresponding read_results call is stacked, and together with the new spec this information is returned as a new DataArray. Parameters ---------- model_run_name : str model_name : str output_name : str timesteps : optional list of timesteps decision_iterations : optional list of decision iterations time_decision_tuples : optional list of unique (timestep, decision) tuples Returns ------- DataArray with expanded spec and the data requested """ available = self.available_results(model_run_name) # Build up the necessary list of tuples if not timesteps and not decision_iterations and not time_decision_tuples: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name ] elif timesteps and not decision_iterations and not time_decision_tuples: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and t in timesteps ] elif decision_iterations and not timesteps and not time_decision_tuples: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and d in decision_iterations ] elif time_decision_tuples and not timesteps and not decision_iterations: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and (t, d) in time_decision_tuples ] elif timesteps and decision_iterations and not time_decision_tuples: t_d = list(itertools.product(timesteps, decision_iterations)) list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and (t, d) in t_d ] else: msg = "Expected either timesteps, or decisions, or (timestep, decision) " + \ "tuples, or timesteps and decisions, or none of the above." raise ValueError(msg) if not list_of_tuples: raise SmifDataNotFoundError("None of the requested data is available.") return self._get_result_darray_internal( model_run_name, model_name, output_name, sorted(list_of_tuples) ) def get_results(self, model_run_names: list, model_name: str, output_names: list, timesteps: list = None, decisions: list = None, time_decision_tuples: list = None, ): """Return data for multiple timesteps and decision iterations for a given output from a given sector model for multiple model runs. Parameters ---------- model_run_names: list[str] the requested model run names model_name: str the requested sector model name output_names: list[str] the requested output names (output specs must all match) timesteps: list[int] the requested timesteps decisions: list[int] the requested decision iterations time_decision_tuples: list[tuple] a list of requested (timestep, decision) tuples Returns ------- dict Nested dictionary of DataArray objects, keyed on model run name and output name. Returned DataArrays include one extra (timestep, decision_iteration) dimension. """ # List the available output names and verify requested outputs match outputs = self.read_model(model_name)['outputs'] available_outputs = [output['name'] for output in outputs] for output_name in output_names: assert output_name in available_outputs, \ '{} is not an output of sector model {}.'.format(output_name, model_name) # The spec for each requested output must be the same. We check they have the same # coordinates coords = [Spec.from_dict(output).coords for output in outputs if output['name'] in output_names] for coord in coords: if coord != coords[0]: raise ValueError('Different outputs must have the same coordinates') # Now actually obtain the requested results results_dict = OrderedDict() # type: OrderedDict for model_run_name in model_run_names: results_dict[model_run_name] = OrderedDict() for output_name in output_names: results_dict[model_run_name][output_name] = self.get_result_darray( model_run_name, model_name, output_name, timesteps, decisions, time_decision_tuples ) return results_dict # endregion # region data store utilities def _key_from_data(self, path, *args): """Return path or generate a unique key for a given set of args """ if isinstance(self.data_store, (CSVDataStore, ParquetDataStore)): return path else: return tuple(args) # endregion def _pick_from_list(list_of_dicts, name): for item in list_of_dicts: if 'name' in item and item['name'] == name: return item return None
"""The store provides a common data interface to smif configuration, data and metadata. Raises ------ SmifDataNotFoundError If data cannot be found in the store when try to read from the store SmifDataExistsError If data already exists in the store when trying to write to the store (use an update method instead) SmifDataMismatchError Data presented to read, write and update methods is in the incorrect format or of wrong dimensions to that expected SmifDataReadError When unable to read data e.g. unable to handle file type or connect to database """ import itertools import logging import os from collections import OrderedDict, defaultdict from copy import deepcopy from operator import itemgetter from os.path import splitext from typing import Dict, List, Optional, Union import numpy as np # type: ignore from smif.data_layer import DataArray from smif.data_layer.abstract_data_store import DataStore from smif.data_layer.abstract_metadata_store import MetadataStore from smif.data_layer.file import (CSVDataStore, FileMetadataStore, ParquetDataStore, YamlConfigStore) from smif.data_layer.validate import (validate_sos_model_config, validate_sos_model_format) from smif.exception import SmifDataError, SmifDataNotFoundError from smif.metadata.spec import Spec class Store(): """Common interface to data store, composed of config, metadata and data store implementations. Parameters ---------- config_store: ~smif.data_layer.abstract_config_store.ConfigStore metadata_store: ~smif.data_layer.abstract_metadata_store.MetadataStore data_store: ~smif.data_layer.abstract_data_store.DataStore """ def __init__(self, config_store, metadata_store: MetadataStore, data_store: DataStore, model_base_folder="."): self.logger = logging.getLogger(__name__) self.config_store = config_store self.metadata_store = metadata_store self.data_store = data_store # base folder for any relative paths to models self.model_base_folder = str(model_base_folder) @classmethod def from_dict(cls, config): """Create Store from configuration dict """ try: interface = config['interface'] except KeyError: logging.warning('No interface provided for Results(). Assuming local_csv') interface = 'local_csv' try: directory = config['dir'] except KeyError: logging.warning("No directory provided for Results(). Assuming '.'") directory = '.' # Check that the directory is valid if not os.path.isdir(directory): raise ValueError('Expected {} to be a valid directory'.format(directory)) if interface == 'local_csv': data_store = CSVDataStore(directory) elif interface == 'local_parquet': data_store = ParquetDataStore(directory) else: raise ValueError( 'Unsupported interface "{}". Supply local_csv or local_parquet'.format( interface)) return cls( config_store=YamlConfigStore(directory), metadata_store=FileMetadataStore(directory), data_store=data_store, model_base_folder=directory ) # # CONFIG # # region Model runs def read_model_runs(self): """Read all system-of-system model runs Returns ------- list[~smif.controller.modelrun.ModelRun] """ return sorted(self.config_store.read_model_runs(), key=itemgetter('name')) def read_model_run(self, model_run_name): """Read a system-of-system model run Parameters ---------- model_run_name : str Returns ------- ~smif.controller.modelrun.ModelRun """ return self.config_store.read_model_run(model_run_name) def write_model_run(self, model_run): """Write system-of-system model run Parameters ---------- model_run : ~smif.controller.modelrun.ModelRun """ self.config_store.write_model_run(model_run) def update_model_run(self, model_run_name, model_run): """Update system-of-system model run Parameters ---------- model_run_name : str model_run : ~smif.controller.modelrun.ModelRun """ self.config_store.update_model_run(model_run_name, model_run) def delete_model_run(self, model_run_name): """Delete a system-of-system model run Parameters ---------- model_run_name : str """ self.config_store.delete_model_run(model_run_name) # endregion # region System-of-systems models def read_sos_models(self): """Read all system-of-system models Returns ------- list[~smif.model.sos_model.SosModel] """ return sorted(self.config_store.read_sos_models(), key=itemgetter('name')) def read_sos_model(self, sos_model_name): """Read a specific system-of-system model Parameters ---------- sos_model_name : str Returns ------- ~smif.model.sos_model.SosModel """ return self.config_store.read_sos_model(sos_model_name) def write_sos_model(self, sos_model): """Write system-of-system model Parameters ---------- sos_model : ~smif.model.sos_model.SosModel """ validate_sos_model_format(sos_model) self.config_store.write_sos_model(sos_model) def update_sos_model(self, sos_model_name, sos_model): """Update system-of-system model Parameters ---------- sos_model_name : str sos_model : ~smif.model.sos_model.SosModel """ models = self.config_store.read_models() scenarios = self.config_store.read_scenarios() validate_sos_model_config(sos_model, models, scenarios) self.config_store.update_sos_model(sos_model_name, sos_model) def delete_sos_model(self, sos_model_name): """Delete a system-of-system model Parameters ---------- sos_model_name : str """ self.config_store.delete_sos_model(sos_model_name) # endregion # region Models def read_models(self, skip_coords=False): """Read all models Returns ------- list[~smif.model.model.Model] """ models = sorted(self.config_store.read_models(), key=itemgetter('name')) if not skip_coords: models = [ self._add_coords(model, ('inputs', 'outputs', 'parameters')) for model in models ] return models def read_model(self, model_name, skip_coords=False): """Read a model Parameters ---------- model_name : str Returns ------- ~smif.model.model.Model """ model = self.config_store.read_model(model_name) if not skip_coords: model = self._add_coords(model, ('inputs', 'outputs', 'parameters')) return model def write_model(self, model): """Write a model Parameters ---------- model : ~smif.model.model.Model """ self.config_store.write_model(model) def update_model(self, model_name, model): """Update a model Parameters ---------- model_name : str model : ~smif.model.model.Model """ self.config_store.update_model(model_name, model) def delete_model(self, model_name): """Delete a model Parameters ---------- model_name : str """ self.config_store.delete_model(model_name) # endregion # region Scenarios def read_scenarios(self, skip_coords=False): """Read scenarios Returns ------- list[~smif.model.ScenarioModel] """ scenarios = sorted(self.config_store.read_scenarios(), key=itemgetter('name')) if not skip_coords: scenarios = [ self._add_coords(scenario, ['provides']) for scenario in scenarios ] return scenarios def read_scenario(self, scenario_name, skip_coords=False): """Read a scenario Parameters ---------- scenario_name : str Returns ------- ~smif.model.ScenarioModel """ scenario = self.config_store.read_scenario(scenario_name) if not skip_coords: scenario = self._add_coords(scenario, ['provides']) return scenario def write_scenario(self, scenario): """Write scenario Parameters ---------- scenario : ~smif.model.ScenarioModel """ self.config_store.write_scenario(scenario) def update_scenario(self, scenario_name, scenario): """Update scenario Parameters ---------- scenario_name : str scenario : ~smif.model.ScenarioModel """ self.config_store.update_scenario(scenario_name, scenario) def delete_scenario(self, scenario_name): """Delete scenario from project configuration Parameters ---------- scenario_name : str """ self.config_store.delete_scenario(scenario_name) def prepare_scenario(self, scenario_name, list_of_variants): """ Modify {scenario_name} config file to include multiple scenario variants. Parameters ---------- scenario_name: str list_of_variants: list[int] - indices of scenario variants """ scenario = self.read_scenario(scenario_name) # Check that template scenario file does not define more than one variant if not scenario['variants'] or len(scenario['variants']) > 1: raise SmifDataError("Template scenario file must define one" " unique template variant.") # Read variant defined in template scenario file variant_template_name = scenario['variants'][0]['name'] base_variant = self.read_scenario_variant(scenario_name, variant_template_name) self.delete_scenario_variant(scenario_name, variant_template_name) # Read template names of scenario variant data files output_filenames = {} # output_name => (base, ext) # root is a dict. keyed on scenario outputs. # Entries contain the root of the variants filenames for output in scenario['provides']: output_name = output['name'] base, ext = splitext(base_variant['data'][output_name]) output_filenames[output_name] = base, ext # Now modify scenario file for variant_number in list_of_variants: # Copying the variant dict is required when underlying config_store # is an instance of MemoryConfigStore, which attribute _scenarios holds # a reference to the variant object passed to update or # write_scenario_variant variant = deepcopy(base_variant) variant['name'] = '{}_{:03d}'.format(scenario_name, variant_number) variant['description'] = '{} variant number {:03d}'.format( scenario_name, variant_number) for output_name, (base, ext) in output_filenames.items(): variant['data'][output_name] = '{}{:03d}{}'.format(base, variant_number, ext) self.write_scenario_variant(scenario_name, variant) def prepare_model_runs(self, model_run_name, scenario_name, first_var, last_var): """Write multiple model run config files corresponding to multiple scenario variants of {scenario_name}, based on template {model_run_name} Write batchfile containing each of the generated model runs Parameters ---------- model_run_name: str scenario_name: str first_var: int - between 0 and number of variants-1 last_var: int - between first_var and number of variants-1 """ model_run = self.read_model_run(model_run_name) scenario = self.read_scenario(scenario_name) # read strategies from config store (Store.read_strategies pulls together data on # interventions as well, which we don't need here) config_strategies = self.config_store.read_strategies(model_run_name) # Open batchfile f_handle = open(model_run_name + '.batch', 'w') # For each variant model_run, write a new model run file with corresponding # scenario variant and update batchfile for variant in scenario['variants'][first_var:last_var + 1]: variant_model_run_name = model_run_name + '_' + variant['name'] model_run_copy = deepcopy(model_run) model_run_copy['name'] = variant_model_run_name model_run_copy['scenarios'][scenario_name] = variant['name'] self.write_model_run(model_run_copy) self.config_store.write_strategies(variant_model_run_name, config_strategies) f_handle.write(model_run_name + '_' + variant['name'] + '\n') # Close batchfile f_handle.close() # endregion # region Scenario Variants def read_scenario_variants(self, scenario_name): """Read variants of a given scenario Parameters ---------- scenario_name : str Returns ------- list[dict] """ scenario_variants = self.config_store.read_scenario_variants(scenario_name) return sorted(scenario_variants, key=itemgetter('name')) def read_scenario_variant(self, scenario_name, variant_name): """Read a scenario variant Parameters ---------- scenario_name : str variant_name : str Returns ------- dict """ return self.config_store.read_scenario_variant(scenario_name, variant_name) def write_scenario_variant(self, scenario_name, variant): """Write scenario to project configuration Parameters ---------- scenario_name : str variant : dict """ self.config_store.write_scenario_variant(scenario_name, variant) def update_scenario_variant(self, scenario_name, variant_name, variant): """Update scenario to project configuration Parameters ---------- scenario_name : str variant_name : str variant : dict """ self.config_store.update_scenario_variant(scenario_name, variant_name, variant) def delete_scenario_variant(self, scenario_name, variant_name): """Delete scenario from project configuration Parameters ---------- scenario_name : str variant_name : str """ self.config_store.delete_scenario_variant(scenario_name, variant_name) # endregion # region Narratives def read_narrative(self, sos_model_name, narrative_name): """Read narrative from sos_model Parameters ---------- sos_model_name : str narrative_name : str """ return self.config_store.read_narrative(sos_model_name, narrative_name) # endregion # region Strategies def read_strategies(self, model_run_name): """Read strategies for a given model run Parameters ---------- model_run_name : str Returns ------- list[dict] """ strategies = deepcopy(self.config_store.read_strategies(model_run_name)) for strategy in strategies: if strategy['type'] == 'pre-specified-planning': strategy['interventions'] = self.data_store.read_strategy_interventions( strategy) return strategies def write_strategies(self, model_run_name, strategies): """Write strategies for a given model_run Parameters ---------- model_run_name : str strategies : list[dict] """ self.config_store.write_strategies(model_run_name, strategies) def convert_strategies_data(self, model_run_name, tgt_store, noclobber=False): strategies = self.read_strategies(model_run_name) for strategy in strategies: if strategy['type'] == 'pre-specified-planning': data_exists = tgt_store.read_strategy_interventions( strategy, assert_exists=True) if not(noclobber and data_exists): data = self.read_strategy_interventions(strategy) tgt_store.write_strategy_interventions(strategy, data) # endregion # # METADATA # # region Units def read_unit_definitions(self) -> List[str]: """Reads custom unit definitions Returns ------- list[str] Pint-compatible unit definitions """ return self.metadata_store.read_unit_definitions() def write_unit_definitions(self, definitions): """Reads custom unit definitions Parameters ---------- list[str] Pint-compatible unit definitions """ self.metadata_store.write_unit_definitions(definitions) # endregion # region Dimensions def read_dimensions(self, skip_coords=False): """Read dimensions Returns ------- list[~smif.metadata.coords.Coords] """ return self.metadata_store.read_dimensions(skip_coords) def read_dimension(self, dimension_name, skip_coords=False): """Return dimension Parameters ---------- dimension_name : str Returns ------- ~smif.metadata.coords.Coords A dimension definition (including elements) """ return self.metadata_store.read_dimension(dimension_name, skip_coords) def write_dimension(self, dimension): """Write dimension to project configuration Parameters ---------- dimension : ~smif.metadata.coords.Coords """ self.metadata_store.write_dimension(dimension) def update_dimension(self, dimension_name, dimension): """Update dimension Parameters ---------- dimension_name : str dimension : ~smif.metadata.coords.Coords """ self.metadata_store.update_dimension(dimension_name, dimension) def delete_dimension(self, dimension_name): """Delete dimension Parameters ---------- dimension_name : str """ self.metadata_store.delete_dimension(dimension_name) def _add_coords(self, item, keys): """Add coordinates to spec definitions on an object """ item = deepcopy(item) for key in keys: spec_list = item[key] for spec in spec_list: if 'dims' in spec and spec['dims']: spec['coords'] = { dim: self.read_dimension(dim)['elements'] for dim in spec['dims'] } return item # endregion # # DATA # # region Scenario Variant Data def read_scenario_variant_data( self, scenario_name: str, variant_name: str, variable: str, timestep: Optional[int] = None, timesteps: Optional[List[int]] = None, assert_exists: bool = False) -> Union[DataArray, bool]: """Read scenario data file Parameters ---------- scenario_name : str variant_name : str variable : str timestep : int Returns ------- data : ~smif.data_layer.data_array.DataArray """ variant = self.read_scenario_variant(scenario_name, variant_name) key = self._key_from_data(variant['data'][variable], scenario_name, variant_name, variable) scenario = self.read_scenario(scenario_name) spec_dict = _pick_from_list(scenario['provides'], variable) spec = Spec.from_dict(spec_dict) if assert_exists: return self.data_store.scenario_variant_data_exists(key) else: return self.data_store.read_scenario_variant_data(key, spec, timestep, timesteps) def write_scenario_variant_data(self, scenario_name, variant_name, data): """Write scenario data file Parameters ---------- scenario_name : str variant_name : str data : ~smif.data_layer.data_array.DataArray """ variant = self.read_scenario_variant(scenario_name, variant_name) key = self._key_from_data(variant['data'][data.spec.name], scenario_name, variant_name, data.spec.name) self.data_store.write_scenario_variant_data(key, data) def convert_scenario_data(self, model_run_name, tgt_store, noclobber=False): model_run = self.read_model_run(model_run_name) # Convert scenario data for model run for scenario_name in model_run['scenarios']: for variant in self.read_scenario_variants(scenario_name): for variable in variant['data']: data_exists = tgt_store.read_scenario_variant_data( scenario_name, variant['name'], variable, assert_exists=True) if not(noclobber and data_exists): data_array = self.read_scenario_variant_data( scenario_name, variant['name'], variable) tgt_store.write_scenario_variant_data( scenario_name, variant['name'], data_array) # endregion # region Narrative Data def read_narrative_variant_data(self, sos_model_name, narrative_name, variant_name, parameter_name, timestep=None, assert_exists=False): """Read narrative data file Parameters ---------- sos_model_name : str narrative_name : str variant_name : str parameter_name : str timestep : int (optional) If None, read data for all timesteps Returns ------- ~smif.data_layer.data_array.DataArray """ sos_model = self.read_sos_model(sos_model_name) narrative = _pick_from_list(sos_model['narratives'], narrative_name) if narrative is None: msg = "Narrative name '{}' does not exist in sos_model '{}'" raise SmifDataNotFoundError(msg.format(narrative_name, sos_model_name)) variant = _pick_from_list(narrative['variants'], variant_name) if variant is None: msg = "Variant name '{}' does not exist in narrative '{}'" raise SmifDataNotFoundError(msg.format(variant_name, narrative_name)) key = self._key_from_data(variant['data'][parameter_name], narrative_name, variant_name, parameter_name) if assert_exists: return self.data_store.narrative_variant_data_exists(key) else: spec_dict = None # find sector model which needs this parameter, to get spec definition for model_name, params in narrative['provides'].items(): if parameter_name in params: sector_model = self.read_model(model_name) spec_dict = _pick_from_list(sector_model['parameters'], parameter_name) break # find spec if spec_dict is None: raise SmifDataNotFoundError("Parameter {} not found in any of {}".format( parameter_name, sos_model['sector_models'])) spec = Spec.from_dict(spec_dict) return self.data_store.read_narrative_variant_data(key, spec, timestep) def write_narrative_variant_data(self, sos_model_name, narrative_name, variant_name, data): """Read narrative data file Parameters ---------- sos_model_name : str narrative_name : str variant_name : str data : ~smif.data_layer.data_array.DataArray """ sos_model = self.read_sos_model(sos_model_name) narrative = _pick_from_list(sos_model['narratives'], narrative_name) variant = _pick_from_list(narrative['variants'], variant_name) key = self._key_from_data( variant['data'][data.spec.name], narrative_name, variant_name, data.spec.name) self.data_store.write_narrative_variant_data(key, data) def convert_narrative_data(self, sos_model_name, tgt_store, noclobber=False): sos_model = self.read_sos_model(sos_model_name) for narrative in sos_model['narratives']: for variant in narrative['variants']: for param in variant['data']: data_exists = tgt_store.read_narrative_variant_data(sos_model_name, narrative['name'], variant['name'], param, assert_exists=True) if not(noclobber and data_exists): data_array = self.read_narrative_variant_data(sos_model_name, narrative['name'], variant['name'], param) tgt_store.write_narrative_variant_data(sos_model_name, narrative['name'], variant['name'], data_array) def read_model_parameter_default(self, model_name, parameter_name, assert_exists=False): """Read default data for a sector model parameter Parameters ---------- model_name : str parameter_name : str Returns ------- ~smif.data_layer.data_array.DataArray """ model = self.read_model(model_name) param = _pick_from_list(model['parameters'], parameter_name) spec = Spec.from_dict(param) try: path = param['default'] except TypeError: raise SmifDataNotFoundError("Parameter {} not found in model {}".format( parameter_name, model_name)) except KeyError: path = 'default__{}__{}.csv'.format(model_name, parameter_name) key = self._key_from_data(path, model_name, parameter_name) if assert_exists: return self.data_store.model_parameter_default_data_exists(key) else: return self.data_store.read_model_parameter_default(key, spec) def write_model_parameter_default(self, model_name, parameter_name, data): """Write default data for a sector model parameter Parameters ---------- model_name : str parameter_name : str data : ~smif.data_layer.data_array.DataArray """ model = self.read_model(model_name, skip_coords=True) param = _pick_from_list(model['parameters'], parameter_name) try: path = param['default'] except TypeError: raise SmifDataNotFoundError("Parameter {} not found in model {}".format( parameter_name, model_name)) except KeyError: path = 'default__{}__{}.csv'.format(model_name, parameter_name) key = self._key_from_data(path, model_name, parameter_name) self.data_store.write_model_parameter_default(key, data) def convert_model_parameter_default_data(self, sector_model_name, tgt_store, noclobber=False): sector_model = self.read_model(sector_model_name) for parameter in sector_model['parameters']: data_exists = tgt_store.read_model_parameter_default(sector_model_name, parameter['name'], assert_exists=True) if not(noclobber and data_exists): data_array = self.read_model_parameter_default(sector_model_name, parameter['name']) tgt_store.write_model_parameter_default(sector_model_name, parameter['name'], data_array) # endregion # region Interventions def read_interventions(self, model_name): """Read interventions data for `model_name` Returns ------- dict[str, dict] A dict of intervention dictionaries containing intervention attributes keyed by intervention name """ model = self.read_model(model_name, skip_coords=True) if model['interventions'] != []: return self.data_store.read_interventions(model['interventions']) else: return {} def write_interventions(self, model_name, interventions): """Write interventions data for a model Parameters ---------- dict[str, dict] A dict of intervention dictionaries containing intervention attributes keyed by intervention name """ model = self.read_model(model_name) model['interventions'] = [model_name + '.csv'] self.update_model(model_name, model) self.data_store.write_interventions(model['interventions'][0], interventions) def write_interventions_file(self, model_name, string_id, interventions): model = self.read_model(model_name) if string_id in model['interventions']: self.data_store.write_interventions(string_id, interventions) else: raise SmifDataNotFoundError("Intervention {} not found for" " sector model {}.".format(string_id, model_name)) def read_interventions_file(self, model_name, string_id, assert_exists=False): model = self.read_model(model_name) if string_id in model['interventions']: if assert_exists: return self.data_store.interventions_data_exists(string_id) else: return self.data_store.read_interventions([string_id]) else: raise SmifDataNotFoundError("Intervention {} not found for" " sector model {}.".format(string_id, model_name)) def convert_interventions_data(self, sector_model_name, tgt_store, noclobber=False): sector_model = self.read_model(sector_model_name) for intervention in sector_model['interventions']: data_exists = tgt_store.read_interventions_file(sector_model_name, intervention, assert_exists=True) if not(noclobber and data_exists): interventions = self.read_interventions_file(sector_model_name, intervention) tgt_store.write_interventions_file( sector_model_name, intervention, interventions) def read_strategy_interventions(self, strategy, assert_exists=False): """Read interventions as defined in a model run strategy """ if assert_exists: return self.data_store.strategy_data_exists(strategy) else: return self.data_store.read_strategy_interventions(strategy) def write_strategy_interventions(self, strategy, data): """ Parameters ---------- list[dicts] """ self.data_store.write_strategy_interventions(strategy, data) def read_initial_conditions(self, model_name) -> List[Dict]: """Read historical interventions for `model_name` Returns ------- list[dict] A list of historical interventions, with keys 'name' and 'build_year' """ model = self.read_model(model_name) if model['initial_conditions'] != []: return self.data_store.read_initial_conditions(model['initial_conditions']) else: return [] def write_initial_conditions(self, model_name, initial_conditions): """Write historical interventions for a model Parameters ---------- list[dict] A list of historical interventions, with keys 'name' and 'build_year' """ model = self.read_model(model_name) model['initial_conditions'] = [model_name + '.csv'] self.update_model(model_name, model) self.data_store.write_initial_conditions(model['initial_conditions'][0], initial_conditions) def write_initial_conditions_file(self, model_name, string_id, initial_conditions): model = self.read_model(model_name) if string_id in model['initial_conditions']: self.data_store.write_initial_conditions(string_id, initial_conditions) else: raise SmifDataNotFoundError("Initial condition {} not found for" " sector model {}.".format(string_id, model_name)) def read_initial_conditions_file(self, model_name, string_id, assert_exists=False): model = self.read_model(model_name) if string_id in model['initial_conditions']: if assert_exists: return self.data_store.initial_conditions_data_exists(string_id) else: return self.data_store.read_initial_conditions([string_id]) else: raise SmifDataNotFoundError("Initial conditions {} not found for" " sector model {}.".format(string_id, model_name)) def convert_initial_conditions_data(self, sector_model_name, tgt_store, noclobber=False): sector_model = self.read_model(sector_model_name) for initial_condition in sector_model['initial_conditions']: data_exists = tgt_store.read_initial_conditions_file(sector_model_name, initial_condition, assert_exists=True) if not(noclobber and data_exists): initial_conditions = self.read_initial_conditions_file(sector_model_name, initial_condition) tgt_store.write_initial_conditions_file(sector_model_name, initial_condition, initial_conditions) def read_all_initial_conditions(self, model_run_name) -> List[Dict]: """A list of all historical interventions Returns ------- list[dict] """ historical_interventions = [] # type: List model_run = self.read_model_run(model_run_name) sos_model_name = model_run['sos_model'] sos_model = self.read_sos_model(sos_model_name) sector_model_names = sos_model['sector_models'] for sector_model_name in sector_model_names: historical_interventions.extend( self.read_initial_conditions(sector_model_name) ) return historical_interventions # endregion # region State def read_state(self, model_run_name, timestep, decision_iteration=None) -> List[Dict]: """Read list of (name, build_year) for a given model_run, timestep, decision Parameters ---------- model_run_name : str timestep : int decision_iteration : int, optional Returns ------- list[dict] """ return self.data_store.read_state(model_run_name, timestep, decision_iteration) def write_state(self, state, model_run_name, timestep, decision_iteration=None): """State is a list of decisions with name and build_year. State is output from the DecisionManager Parameters ---------- state : list[dict] model_run_name : str timestep : int decision_iteration : int, optional """ self.data_store.write_state(state, model_run_name, timestep, decision_iteration) # endregion # region Conversion coefficients def read_coefficients(self, source_dim: str, destination_dim: str) -> np.ndarray: """Reads coefficients from the store Coefficients are uniquely identified by their source/destination dimensions. This method and `write_coefficients` implement caching of conversion coefficients between dimensions. Parameters ---------- source_dim : str Dimension name destination_dim : str Dimension name Returns ------- numpy.ndarray Notes ----- To be called from :class:`~smif.convert.adaptor.Adaptor` implementations. """ return self.data_store.read_coefficients(source_dim, destination_dim) def write_coefficients(self, source_dim: str, destination_dim: str, data: np.ndarray): """Writes coefficients to the store Coefficients are uniquely identified by their source/destination dimensions. This method and `read_coefficients` implement caching of conversion coefficients between dimensions. Parameters ---------- source_dim : str Dimension name destination_dim : str Dimension name data : numpy.ndarray Notes ----- To be called from :class:`~smif.convert.adaptor.Adaptor` implementations. """ self.data_store.write_coefficients(source_dim, destination_dim, data) # endregion # region Results def read_results(self, model_run_name: str, model_name: str, output_spec: Spec, timestep: Optional[int] = None, decision_iteration: Optional[int] = None) -> DataArray: """Return results of a `model_name` in `model_run_name` for a given `output_name` Parameters ---------- model_run_name : str model_name : str output_spec : smif.metadata.Spec timestep : int, default=None decision_iteration : int, default=None Returns ------- ~smif.data_layer.data_array.DataArray """ return self.data_store.read_results( model_run_name, model_name, output_spec, timestep, decision_iteration) def write_results(self, data_array, model_run_name, model_name, timestep=None, decision_iteration=None): """Write results of a `model_name` in `model_run_name` for a given `output_name` Parameters ---------- data_array : ~smif.data_layer.data_array.DataArray model_run_id : str model_name : str timestep : int, optional decision_iteration : int, optional """ self.data_store.write_results( data_array, model_run_name, model_name, timestep, decision_iteration) def delete_results(self, model_run_name, model_name, output_name, timestep=None, decision_iteration=None): """Delete results for a single timestep/iteration of a model output in a model run Parameters ---------- model_run_name : str model_name : str output_name : str timestep : int, default=None decision_iteration : int, default=None """ self.data_store.delete_results( model_run_name, model_name, output_name, timestep, decision_iteration) def clear_results(self, model_run_name): """Clear all results from a single model run Parameters ---------- model_run_name : str """ available = self.available_results(model_run_name) for timestep, decision_iteration, model_name, output_name in available: self.data_store.delete_results( model_run_name, model_name, output_name, timestep, decision_iteration) def available_results(self, model_run_name): """List available results from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name, output_name) """ return self.data_store.available_results(model_run_name) def completed_jobs(self, model_run_name): """List completed jobs from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name) """ available_results = self.available_results(model_run_name) # {(t, d, model, output)} model_outputs = self.expected_model_outputs(model_run_name) # [(model, output)] completed_jobs = self.filter_complete_available_results( available_results, model_outputs) return completed_jobs @staticmethod def filter_complete_available_results(available_results, expected_model_outputs): """Filter available results from a model run to include only complete timestep/decision iteration combinations Parameters ---------- available_results: list[tuple] List of (timestep, decision_iteration, model_name, output_name) expected_model_outputs: list[tuple] List or set of (model_name, output_name) Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name) """ expected_model_outputs = set(expected_model_outputs) model_names = {model_name for model_name, _ in expected_model_outputs} model_outputs_by_td = defaultdict(set) for timestep, decision, model_name, output_name in available_results: model_outputs_by_td[(timestep, decision)].add((model_name, output_name)) completed_jobs = [] for (timestep, decision), td_model_outputs in model_outputs_by_td.items(): if td_model_outputs == expected_model_outputs: for model_name in model_names: completed_jobs.append((timestep, decision, model_name)) return completed_jobs def expected_model_outputs(self, model_run_name): """List expected model outputs from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (model_name, output_name) """ model_run = self.read_model_run(model_run_name) sos_model_name = model_run['sos_model'] sos_config = self.read_sos_model(sos_model_name) # For each model, get the outputs and create (model_name, output_name) tuples expected_model_outputs = [] for model_name in sos_config['sector_models']: model_config = self.read_model(model_name) for output in model_config['outputs']: expected_model_outputs.append((model_name, output['name'])) return expected_model_outputs def prepare_warm_start(self, model_run_name): """Copy the results from the previous model_run if available The method allows a previous unsuccessful model_run to 'warm start' a new model run from a later timestep. Model results are recovered from the timestep that the previous model_run was run until, and the new model run runs from the returned timestep Parameters ---------- model_run_name : str Returns ------- int The timestep to which the data store was recovered Notes ----- Called from smif.controller.execute """ available_results = self.available_results(model_run_name) if available_results: max_timestep = max( timestep for timestep, decision_iteration, model_name, output_name in available_results ) # could explicitly clear results for max timestep else: max_timestep = None return max_timestep def canonical_available_results(self, model_run_name): """List the results that are available from a model run, collapsing all decision iterations. This is the unique items from calling `available_results`, with all decision iterations set to 0. This method is used to determine whether a model run is complete, given that it is impossible to know how many decision iterations to expect: we simply check that each expected timestep has been completed. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing available results """ available_results = self.available_results(model_run_name) canonical_list = [] for t, d, model_name, output_name in available_results: canonical_list.append((t, 0, model_name, output_name)) # Return as a set to remove duplicates return set(canonical_list) def canonical_expected_results(self, model_run_name): """List the results that are expected from a model run, collapsing all decision iterations. For a complete model run, this would coincide with the unique list returned from `available_results`, where all decision iterations are set to 0. This method is used to determine whether a model run is complete, given that it is impossible to know how many decision iterations to expect: we simply check that each expected timestep has been completed. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing expected results """ # Model results are returned as a tuple # (timestep, decision_it, model_name, output_name) # so we first build the full list of expected results tuples. expected_results = [] # Get the sos model name given the model run name, and the full list of timesteps model_run = self.read_model_run(model_run_name) timesteps = sorted(model_run['timesteps']) sos_model_name = model_run['sos_model'] # Get the list of sector models in the sos model sos_config = self.read_sos_model(sos_model_name) # For each sector model, get the outputs and create the tuples for model_name in sos_config['sector_models']: model_config = self.read_model(model_name) outputs = model_config['outputs'] for output, t in itertools.product(outputs, timesteps): expected_results.append((t, 0, model_name, output['name'])) # Return as a set to remove duplicates return set(expected_results) def canonical_missing_results(self, model_run_name): """List the results that are missing from a model run, collapsing all decision iterations. For a complete model run, this is what is left after removing canonical_available_results from canonical_expected_results. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing missing results """ return self.canonical_expected_results( model_run_name) - self.canonical_available_results(model_run_name) def _get_result_darray_internal(self, model_run_name, model_name, output_name, time_decision_tuples): """Internal implementation for `get_result_darray`, after the unique list of (timestep, decision) tuples has been generated and validated. This method gets the spec for the output defined by the model_run_name, model_name and output_name and expands the spec to include an additional dimension for the list of tuples. Then, for each tuple, the data array from the corresponding read_results call is stacked, and together with the new spec this information is returned as a new DataArray. Parameters ---------- model_run_name : str model_name : str output_name : str time_decision_tuples : list of unique (timestep, decision) tuples Returns ------- DataArray with expanded spec and data for each (timestep, decision) tuple """ # Get the output spec given the name of the sector model and output output_spec = None model = self.read_model(model_name) for output in model['outputs']: # Ignore if the output name doesn't match if output_name != output['name']: continue output_spec = Spec.from_dict(output) assert output_spec, "Output name was not found in model outputs" # Read the results for each (timestep, decision) tuple and stack them list_of_numpy_arrays = [] for t, d in time_decision_tuples: d_array = self.read_results(model_run_name, model_name, output_spec, t, d) list_of_numpy_arrays.append(d_array.data) stacked_data = np.vstack(list_of_numpy_arrays) # Add new dimensions to the data spec output_dict = output_spec.as_dict() output_dict['dims'] = ['timestep_decision'] + output_dict['dims'] output_dict['coords']['timestep_decision'] = time_decision_tuples output_spec = Spec.from_dict(output_dict) # Create a new DataArray from the modified spec and stacked data return DataArray(output_spec, np.reshape(stacked_data, output_spec.shape)) def get_result_darray(self, model_run_name, model_name, output_name, timesteps=None, decision_iterations=None, time_decision_tuples=None): """Return data for multiple timesteps and decision iterations for a given output from a given sector model in a specific model run. You can specify either: a list of (timestep, decision) tuples in which case data for all of those tuples matching the available results will be returned or: a list of timesteps in which case data for all of those timesteps (and any decision iterations) matching the available results will be returned or: a list of decision iterations in which case data for all of those decision iterations (and any timesteps) matching the available results will be returned or: a list of timesteps and a list of decision iterations in which case data for the Cartesian product of those timesteps and those decision iterations matching the available results will be returned or: nothing in which case all available results will be returned Then, for each tuple, the data array from the corresponding read_results call is stacked, and together with the new spec this information is returned as a new DataArray. Parameters ---------- model_run_name : str model_name : str output_name : str timesteps : optional list of timesteps decision_iterations : optional list of decision iterations time_decision_tuples : optional list of unique (timestep, decision) tuples Returns ------- DataArray with expanded spec and the data requested """ available = self.available_results(model_run_name) # Build up the necessary list of tuples if not timesteps and not decision_iterations and not time_decision_tuples: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name ] elif timesteps and not decision_iterations and not time_decision_tuples: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and t in timesteps ] elif decision_iterations and not timesteps and not time_decision_tuples: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and d in decision_iterations ] elif time_decision_tuples and not timesteps and not decision_iterations: list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and (t, d) in time_decision_tuples ] elif timesteps and decision_iterations and not time_decision_tuples: t_d = list(itertools.product(timesteps, decision_iterations)) list_of_tuples = [ (t, d) for t, d, m, out in available if m == model_name and out == output_name and (t, d) in t_d ] else: msg = "Expected either timesteps, or decisions, or (timestep, decision) " + \ "tuples, or timesteps and decisions, or none of the above." raise ValueError(msg) if not list_of_tuples: raise SmifDataNotFoundError("None of the requested data is available.") return self._get_result_darray_internal( model_run_name, model_name, output_name, sorted(list_of_tuples) ) def get_results(self, model_run_names: list, model_name: str, output_names: list, timesteps: list = None, decisions: list = None, time_decision_tuples: list = None, ): """Return data for multiple timesteps and decision iterations for a given output from a given sector model for multiple model runs. Parameters ---------- model_run_names: list[str] the requested model run names model_name: str the requested sector model name output_names: list[str] the requested output names (output specs must all match) timesteps: list[int] the requested timesteps decisions: list[int] the requested decision iterations time_decision_tuples: list[tuple] a list of requested (timestep, decision) tuples Returns ------- dict Nested dictionary of DataArray objects, keyed on model run name and output name. Returned DataArrays include one extra (timestep, decision_iteration) dimension. """ # List the available output names and verify requested outputs match outputs = self.read_model(model_name)['outputs'] available_outputs = [output['name'] for output in outputs] for output_name in output_names: assert output_name in available_outputs, \ '{} is not an output of sector model {}.'.format(output_name, model_name) # The spec for each requested output must be the same. We check they have the same # coordinates coords = [Spec.from_dict(output).coords for output in outputs if output['name'] in output_names] for coord in coords: if coord != coords[0]: raise ValueError('Different outputs must have the same coordinates') # Now actually obtain the requested results results_dict = OrderedDict() # type: OrderedDict for model_run_name in model_run_names: results_dict[model_run_name] = OrderedDict() for output_name in output_names: results_dict[model_run_name][output_name] = self.get_result_darray( model_run_name, model_name, output_name, timesteps, decisions, time_decision_tuples ) return results_dict # endregion # region data store utilities def _key_from_data(self, path, *args): """Return path or generate a unique key for a given set of args """ if isinstance(self.data_store, (CSVDataStore, ParquetDataStore)): return path else: return tuple(args) # endregion def _pick_from_list(list_of_dicts, name): for item in list_of_dicts: if 'name' in item and item['name'] == name: return item return None
en
0.589108
The store provides a common data interface to smif configuration, data and metadata. Raises ------ SmifDataNotFoundError If data cannot be found in the store when try to read from the store SmifDataExistsError If data already exists in the store when trying to write to the store (use an update method instead) SmifDataMismatchError Data presented to read, write and update methods is in the incorrect format or of wrong dimensions to that expected SmifDataReadError When unable to read data e.g. unable to handle file type or connect to database # type: ignore Common interface to data store, composed of config, metadata and data store implementations. Parameters ---------- config_store: ~smif.data_layer.abstract_config_store.ConfigStore metadata_store: ~smif.data_layer.abstract_metadata_store.MetadataStore data_store: ~smif.data_layer.abstract_data_store.DataStore # base folder for any relative paths to models Create Store from configuration dict # Check that the directory is valid # # CONFIG # # region Model runs Read all system-of-system model runs Returns ------- list[~smif.controller.modelrun.ModelRun] Read a system-of-system model run Parameters ---------- model_run_name : str Returns ------- ~smif.controller.modelrun.ModelRun Write system-of-system model run Parameters ---------- model_run : ~smif.controller.modelrun.ModelRun Update system-of-system model run Parameters ---------- model_run_name : str model_run : ~smif.controller.modelrun.ModelRun Delete a system-of-system model run Parameters ---------- model_run_name : str # endregion # region System-of-systems models Read all system-of-system models Returns ------- list[~smif.model.sos_model.SosModel] Read a specific system-of-system model Parameters ---------- sos_model_name : str Returns ------- ~smif.model.sos_model.SosModel Write system-of-system model Parameters ---------- sos_model : ~smif.model.sos_model.SosModel Update system-of-system model Parameters ---------- sos_model_name : str sos_model : ~smif.model.sos_model.SosModel Delete a system-of-system model Parameters ---------- sos_model_name : str # endregion # region Models Read all models Returns ------- list[~smif.model.model.Model] Read a model Parameters ---------- model_name : str Returns ------- ~smif.model.model.Model Write a model Parameters ---------- model : ~smif.model.model.Model Update a model Parameters ---------- model_name : str model : ~smif.model.model.Model Delete a model Parameters ---------- model_name : str # endregion # region Scenarios Read scenarios Returns ------- list[~smif.model.ScenarioModel] Read a scenario Parameters ---------- scenario_name : str Returns ------- ~smif.model.ScenarioModel Write scenario Parameters ---------- scenario : ~smif.model.ScenarioModel Update scenario Parameters ---------- scenario_name : str scenario : ~smif.model.ScenarioModel Delete scenario from project configuration Parameters ---------- scenario_name : str Modify {scenario_name} config file to include multiple scenario variants. Parameters ---------- scenario_name: str list_of_variants: list[int] - indices of scenario variants # Check that template scenario file does not define more than one variant # Read variant defined in template scenario file # Read template names of scenario variant data files # output_name => (base, ext) # root is a dict. keyed on scenario outputs. # Entries contain the root of the variants filenames # Now modify scenario file # Copying the variant dict is required when underlying config_store # is an instance of MemoryConfigStore, which attribute _scenarios holds # a reference to the variant object passed to update or # write_scenario_variant Write multiple model run config files corresponding to multiple scenario variants of {scenario_name}, based on template {model_run_name} Write batchfile containing each of the generated model runs Parameters ---------- model_run_name: str scenario_name: str first_var: int - between 0 and number of variants-1 last_var: int - between first_var and number of variants-1 # read strategies from config store (Store.read_strategies pulls together data on # interventions as well, which we don't need here) # Open batchfile # For each variant model_run, write a new model run file with corresponding # scenario variant and update batchfile # Close batchfile # endregion # region Scenario Variants Read variants of a given scenario Parameters ---------- scenario_name : str Returns ------- list[dict] Read a scenario variant Parameters ---------- scenario_name : str variant_name : str Returns ------- dict Write scenario to project configuration Parameters ---------- scenario_name : str variant : dict Update scenario to project configuration Parameters ---------- scenario_name : str variant_name : str variant : dict Delete scenario from project configuration Parameters ---------- scenario_name : str variant_name : str # endregion # region Narratives Read narrative from sos_model Parameters ---------- sos_model_name : str narrative_name : str # endregion # region Strategies Read strategies for a given model run Parameters ---------- model_run_name : str Returns ------- list[dict] Write strategies for a given model_run Parameters ---------- model_run_name : str strategies : list[dict] # endregion # # METADATA # # region Units Reads custom unit definitions Returns ------- list[str] Pint-compatible unit definitions Reads custom unit definitions Parameters ---------- list[str] Pint-compatible unit definitions # endregion # region Dimensions Read dimensions Returns ------- list[~smif.metadata.coords.Coords] Return dimension Parameters ---------- dimension_name : str Returns ------- ~smif.metadata.coords.Coords A dimension definition (including elements) Write dimension to project configuration Parameters ---------- dimension : ~smif.metadata.coords.Coords Update dimension Parameters ---------- dimension_name : str dimension : ~smif.metadata.coords.Coords Delete dimension Parameters ---------- dimension_name : str Add coordinates to spec definitions on an object # endregion # # DATA # # region Scenario Variant Data Read scenario data file Parameters ---------- scenario_name : str variant_name : str variable : str timestep : int Returns ------- data : ~smif.data_layer.data_array.DataArray Write scenario data file Parameters ---------- scenario_name : str variant_name : str data : ~smif.data_layer.data_array.DataArray # Convert scenario data for model run # endregion # region Narrative Data Read narrative data file Parameters ---------- sos_model_name : str narrative_name : str variant_name : str parameter_name : str timestep : int (optional) If None, read data for all timesteps Returns ------- ~smif.data_layer.data_array.DataArray # find sector model which needs this parameter, to get spec definition # find spec Read narrative data file Parameters ---------- sos_model_name : str narrative_name : str variant_name : str data : ~smif.data_layer.data_array.DataArray Read default data for a sector model parameter Parameters ---------- model_name : str parameter_name : str Returns ------- ~smif.data_layer.data_array.DataArray Write default data for a sector model parameter Parameters ---------- model_name : str parameter_name : str data : ~smif.data_layer.data_array.DataArray # endregion # region Interventions Read interventions data for `model_name` Returns ------- dict[str, dict] A dict of intervention dictionaries containing intervention attributes keyed by intervention name Write interventions data for a model Parameters ---------- dict[str, dict] A dict of intervention dictionaries containing intervention attributes keyed by intervention name Read interventions as defined in a model run strategy Parameters ---------- list[dicts] Read historical interventions for `model_name` Returns ------- list[dict] A list of historical interventions, with keys 'name' and 'build_year' Write historical interventions for a model Parameters ---------- list[dict] A list of historical interventions, with keys 'name' and 'build_year' A list of all historical interventions Returns ------- list[dict] # type: List # endregion # region State Read list of (name, build_year) for a given model_run, timestep, decision Parameters ---------- model_run_name : str timestep : int decision_iteration : int, optional Returns ------- list[dict] State is a list of decisions with name and build_year. State is output from the DecisionManager Parameters ---------- state : list[dict] model_run_name : str timestep : int decision_iteration : int, optional # endregion # region Conversion coefficients Reads coefficients from the store Coefficients are uniquely identified by their source/destination dimensions. This method and `write_coefficients` implement caching of conversion coefficients between dimensions. Parameters ---------- source_dim : str Dimension name destination_dim : str Dimension name Returns ------- numpy.ndarray Notes ----- To be called from :class:`~smif.convert.adaptor.Adaptor` implementations. Writes coefficients to the store Coefficients are uniquely identified by their source/destination dimensions. This method and `read_coefficients` implement caching of conversion coefficients between dimensions. Parameters ---------- source_dim : str Dimension name destination_dim : str Dimension name data : numpy.ndarray Notes ----- To be called from :class:`~smif.convert.adaptor.Adaptor` implementations. # endregion # region Results Return results of a `model_name` in `model_run_name` for a given `output_name` Parameters ---------- model_run_name : str model_name : str output_spec : smif.metadata.Spec timestep : int, default=None decision_iteration : int, default=None Returns ------- ~smif.data_layer.data_array.DataArray Write results of a `model_name` in `model_run_name` for a given `output_name` Parameters ---------- data_array : ~smif.data_layer.data_array.DataArray model_run_id : str model_name : str timestep : int, optional decision_iteration : int, optional Delete results for a single timestep/iteration of a model output in a model run Parameters ---------- model_run_name : str model_name : str output_name : str timestep : int, default=None decision_iteration : int, default=None Clear all results from a single model run Parameters ---------- model_run_name : str List available results from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name, output_name) List completed jobs from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name) # {(t, d, model, output)} # [(model, output)] Filter available results from a model run to include only complete timestep/decision iteration combinations Parameters ---------- available_results: list[tuple] List of (timestep, decision_iteration, model_name, output_name) expected_model_outputs: list[tuple] List or set of (model_name, output_name) Returns ------- list[tuple] Each tuple is (timestep, decision_iteration, model_name) List expected model outputs from a model run Parameters ---------- model_run_name : str Returns ------- list[tuple] Each tuple is (model_name, output_name) # For each model, get the outputs and create (model_name, output_name) tuples Copy the results from the previous model_run if available The method allows a previous unsuccessful model_run to 'warm start' a new model run from a later timestep. Model results are recovered from the timestep that the previous model_run was run until, and the new model run runs from the returned timestep Parameters ---------- model_run_name : str Returns ------- int The timestep to which the data store was recovered Notes ----- Called from smif.controller.execute # could explicitly clear results for max timestep List the results that are available from a model run, collapsing all decision iterations. This is the unique items from calling `available_results`, with all decision iterations set to 0. This method is used to determine whether a model run is complete, given that it is impossible to know how many decision iterations to expect: we simply check that each expected timestep has been completed. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing available results # Return as a set to remove duplicates List the results that are expected from a model run, collapsing all decision iterations. For a complete model run, this would coincide with the unique list returned from `available_results`, where all decision iterations are set to 0. This method is used to determine whether a model run is complete, given that it is impossible to know how many decision iterations to expect: we simply check that each expected timestep has been completed. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing expected results # Model results are returned as a tuple # (timestep, decision_it, model_name, output_name) # so we first build the full list of expected results tuples. # Get the sos model name given the model run name, and the full list of timesteps # Get the list of sector models in the sos model # For each sector model, get the outputs and create the tuples # Return as a set to remove duplicates List the results that are missing from a model run, collapsing all decision iterations. For a complete model run, this is what is left after removing canonical_available_results from canonical_expected_results. Parameters ---------- model_run_name : str Returns ------- set Set of tuples representing missing results Internal implementation for `get_result_darray`, after the unique list of (timestep, decision) tuples has been generated and validated. This method gets the spec for the output defined by the model_run_name, model_name and output_name and expands the spec to include an additional dimension for the list of tuples. Then, for each tuple, the data array from the corresponding read_results call is stacked, and together with the new spec this information is returned as a new DataArray. Parameters ---------- model_run_name : str model_name : str output_name : str time_decision_tuples : list of unique (timestep, decision) tuples Returns ------- DataArray with expanded spec and data for each (timestep, decision) tuple # Get the output spec given the name of the sector model and output # Ignore if the output name doesn't match # Read the results for each (timestep, decision) tuple and stack them # Add new dimensions to the data spec # Create a new DataArray from the modified spec and stacked data Return data for multiple timesteps and decision iterations for a given output from a given sector model in a specific model run. You can specify either: a list of (timestep, decision) tuples in which case data for all of those tuples matching the available results will be returned or: a list of timesteps in which case data for all of those timesteps (and any decision iterations) matching the available results will be returned or: a list of decision iterations in which case data for all of those decision iterations (and any timesteps) matching the available results will be returned or: a list of timesteps and a list of decision iterations in which case data for the Cartesian product of those timesteps and those decision iterations matching the available results will be returned or: nothing in which case all available results will be returned Then, for each tuple, the data array from the corresponding read_results call is stacked, and together with the new spec this information is returned as a new DataArray. Parameters ---------- model_run_name : str model_name : str output_name : str timesteps : optional list of timesteps decision_iterations : optional list of decision iterations time_decision_tuples : optional list of unique (timestep, decision) tuples Returns ------- DataArray with expanded spec and the data requested # Build up the necessary list of tuples Return data for multiple timesteps and decision iterations for a given output from a given sector model for multiple model runs. Parameters ---------- model_run_names: list[str] the requested model run names model_name: str the requested sector model name output_names: list[str] the requested output names (output specs must all match) timesteps: list[int] the requested timesteps decisions: list[int] the requested decision iterations time_decision_tuples: list[tuple] a list of requested (timestep, decision) tuples Returns ------- dict Nested dictionary of DataArray objects, keyed on model run name and output name. Returned DataArrays include one extra (timestep, decision_iteration) dimension. # List the available output names and verify requested outputs match # The spec for each requested output must be the same. We check they have the same # coordinates # Now actually obtain the requested results # type: OrderedDict # endregion # region data store utilities Return path or generate a unique key for a given set of args # endregion
2.17254
2
april q1.py
Manthanc007/APS-2o2o
0
6617593
n=int(input()) l=list(map(int,input().split())) l=list(set(l)) l.sort(reverse=True) if(len(l)==1): print(0) else: print(l[1]%l[0])
n=int(input()) l=list(map(int,input().split())) l=list(set(l)) l.sort(reverse=True) if(len(l)==1): print(0) else: print(l[1]%l[0])
none
1
2.997751
3
loadit/log.py
alvarosanz/loadit
0
6617594
import sys import logging def add_handler(handler, logger=None, format='%(asctime)s - %(message)s', time_format='%Y-%m-%d %H:%M:%S', level=logging.INFO): handler.setLevel(level) handler.setFormatter(logging.Formatter(format, time_format)) logging.getLogger(logger).addHandler(handler) # root logger logging.getLogger().setLevel(logging.INFO) console_handler = logging.StreamHandler(sys.stdout) add_handler(console_handler, format='%(message)s') # central_server logger logging.getLogger('central_server').setLevel(logging.INFO) add_handler(logging.FileHandler('server.log'), logger='central_server') def enable_console(): logging.getLogger().addHandler(console_handler) def disable_console(): logging.getLogger().handlers.remove(console_handler) class ConnectionHandler(logging.StreamHandler): def __init__(self, connection): super().__init__() self.connection = connection def emit(self, record): if record.levelname == 'DEBUG': self.connection.send(record.getMessage(), 'debug_log') elif record.levelname == 'INFO': self.connection.send(record.getMessage(), 'info_log') elif record.levelname == 'WARNING': self.connection.send(record.getMessage(), 'warning_log') elif record.levelname == 'ERROR': self.connection.send(record.getMessage(), 'error_log') elif record.levelname == 'CRITICAL': self.connection.send(record.getMessage(), 'critical_log') _is_file_handler = False def add_file_handler(file=None): global _is_file_handler if not _is_file_handler: if not file: file = 'loadit.log' add_handler(logging.FileHandler(file)) def custom_logging(func): def wrapped(self, *args, **kwargs): try: logging.getLogger().addHandler(self.log) return func(self, *args, **kwargs) finally: logging.getLogger().handlers.remove(self.log) return wrapped
import sys import logging def add_handler(handler, logger=None, format='%(asctime)s - %(message)s', time_format='%Y-%m-%d %H:%M:%S', level=logging.INFO): handler.setLevel(level) handler.setFormatter(logging.Formatter(format, time_format)) logging.getLogger(logger).addHandler(handler) # root logger logging.getLogger().setLevel(logging.INFO) console_handler = logging.StreamHandler(sys.stdout) add_handler(console_handler, format='%(message)s') # central_server logger logging.getLogger('central_server').setLevel(logging.INFO) add_handler(logging.FileHandler('server.log'), logger='central_server') def enable_console(): logging.getLogger().addHandler(console_handler) def disable_console(): logging.getLogger().handlers.remove(console_handler) class ConnectionHandler(logging.StreamHandler): def __init__(self, connection): super().__init__() self.connection = connection def emit(self, record): if record.levelname == 'DEBUG': self.connection.send(record.getMessage(), 'debug_log') elif record.levelname == 'INFO': self.connection.send(record.getMessage(), 'info_log') elif record.levelname == 'WARNING': self.connection.send(record.getMessage(), 'warning_log') elif record.levelname == 'ERROR': self.connection.send(record.getMessage(), 'error_log') elif record.levelname == 'CRITICAL': self.connection.send(record.getMessage(), 'critical_log') _is_file_handler = False def add_file_handler(file=None): global _is_file_handler if not _is_file_handler: if not file: file = 'loadit.log' add_handler(logging.FileHandler(file)) def custom_logging(func): def wrapped(self, *args, **kwargs): try: logging.getLogger().addHandler(self.log) return func(self, *args, **kwargs) finally: logging.getLogger().handlers.remove(self.log) return wrapped
en
0.366014
# root logger # central_server logger
2.447516
2
data/audio/build_audio_database.py
SutirthaChakraborty/speech_separation
5
6617595
import sys sys.path.append("../../model/lib") import os import librosa import numpy as np import utils import operator import itertools import time # Parameter SAMPLE_RANGE = (0,350) # data usage to generate database TEST_RANGE = (350,550) # data usage to generate database REPO_PATH = os.path.expanduser("./audio_train") TRAIN = 1 TEST = 0 # time measure decorator def timit(func): def cal_time(*args,**kwargs): tic = time.time() result = func(*args,**kwargs) tac = time.time() print(func.__name__,'running time: ',(tac-tic),'ms') return result return cal_time # create directory to store database def init_dir(): if not os.path.isdir('./audio_database'): os.mkdir('./audio_database') if not os.path.isdir('./audio_database/mix'): os.mkdir('./audio_database/mix') if not os.path.isdir('./audio_database/single'): os.mkdir('./audio_database/single') if not os.path.isdir('./audio_database/crm'): os.mkdir('./audio_database/crm') def generate_path_list(sample_range=SAMPLE_RANGE,repo_path=REPO_PATH): ''' :param sample_range: :param repo_path: :return: 2D array with idx and path ''' audio_path_list = [] for i in range(sample_range[0],sample_range[1]): path = repo_path + '/trim_audio_train%d.wav'%i if os.path.exists(path): audio_path_list.append((i,path)) print('length of the path list: ',len(audio_path_list)) return audio_path_list # data generate function def single_audio_to_npy(audio_path_list,fix_sr=16000): for idx,path in audio_path_list: data, _ = librosa.load(path, sr=fix_sr) data = utils.fast_stft(data) name = 'single-%05d'%idx np.save(('audio_database/single/%s.npy'%name),data) def generate_mix_sample(audio_path_list,num_speaker,fix_sr=16000,verbose=0): ''' generate mix sample from audios in the list :param audio_path_list: list contains path of the wav audio file :param num_speaker: specify the task for speech separation :param fix_sr: fix sample rate ''' # initiate variables # shape of F_mix = (298,257,2) # shpae of crm = (298,257,2) data_list = [] F_list = [] # STFT list for each sample cRM_list = [] mix_name = "mix" crm_name = "crm" post_name = "" # import data for i in range(num_speaker): idx,path =audio_path_list[i] post_name += "-%05d"%idx data, _ = librosa.load(path,sr=fix_sr) data_list.append(data) # create mix audio according to mix rate mix_rate = 1.0 / float(num_speaker) mix = np.zeros(shape=data_list[0].shape) for data in data_list: mix += data*mix_rate # transfrom data via STFT and several preprocessing function for i in range(num_speaker): F = utils.fast_stft(data_list[i],power=False) F_list.append(F) F_mix = utils.fast_stft(mix,power=False) # create cRM for each speaker and fill into y_sample for i in range(num_speaker): cRM_list.append(utils.fast_cRM(F_list[i],F_mix)) # return values if verbose == 1: print('shape of X: ',F_mix.shape) for i in range(len(cRM_list)): print('shape of cRM%s :'%i,cRM_list[i].shape) # save record in txt mix_name += post_name crm_name += post_name # write txt with open('audio_database/dataset.txt','a') as f: f.write(mix_name+".npy") for i in range(len(cRM_list)): line = " " + crm_name + ("-%05d"%audio_path_list[i][0]) + ".npy" f.write(line) f.write("\n") # save file as npy np.save(('audio_database/mix/%s.npy'%mix_name), F_mix) for i in range(len(cRM_list)): name = crm_name + ("-%05d"%audio_path_list[i][0]) np.save(('audio_database/crm/%s.npy'%name), cRM_list[i]) @timit def generate_dataset(sample_range,repo_path,num_speaker=2): ''' A function to generate dataset :param sample_range: range of the sample to create the dataset :param repo_path: audio repository :param num_speaker: number of speaker to separate :return: X_data, y_data ''' audio_path_list = generate_path_list(sample_range,repo_path) num_data = 0 combinations = itertools.combinations(audio_path_list,num_speaker) for combo in combinations: num_data += 1 generate_mix_sample(combo,num_speaker) print('number of the data generated: ',num_data) @timit def train_test_split(num_start = 0,num_data = 50000,database_txt_path="audio_database/dataset.txt", train_txt_path="audio_database/dataset_train.txt",test_txt_path="audio_database/dataset_val.txt",test_rate=0.01): step = 1 // test_rate count = 0 with open(database_txt_path,'r') as f: for i in range(num_start): _ = f.read() train_txt = open(train_txt_path,'a') test_txt = open(test_txt_path,'a') for line in f: if count > num_data : break count+=1 if count%step==0: test_txt.write(line) continue train_txt.write(line) train_txt.close() test_txt.close() @timit def create_testset(num_start = 53400,num_data=12375,database_txt_path="audio_database/dataset.txt",test_txt_path="audio_database/dataset_test.txt"): count = 0 with open(database_txt_path,'r') as f: test_txt = open(test_txt_path, 'a') for i in range(num_start): f.readline() for line in f: if count > num_data: break count += 1 test_txt.write(line) test_txt.close() ##################################### # create directory init_dir() ## generate training dataset if TRAIN: # generate path audio_path_list = generate_path_list() # generate single data single_audio_to_npy = single_audio_to_npy(audio_path_list) # generate mix data generate_dataset(sample_range=SAMPLE_RANGE,repo_path=REPO_PATH) # split train and test txt instructor file train_test_split() ## generate testing dataset if TEST: audio_path_list = generate_path_list(sample_range=TEST_RANGE,repo_path=REPO_PATH) single_audio_to_npy(audio_path_list) generate_dataset(sample_range=TEST_RANGE, repo_path=REPO_PATH) create_testset()
import sys sys.path.append("../../model/lib") import os import librosa import numpy as np import utils import operator import itertools import time # Parameter SAMPLE_RANGE = (0,350) # data usage to generate database TEST_RANGE = (350,550) # data usage to generate database REPO_PATH = os.path.expanduser("./audio_train") TRAIN = 1 TEST = 0 # time measure decorator def timit(func): def cal_time(*args,**kwargs): tic = time.time() result = func(*args,**kwargs) tac = time.time() print(func.__name__,'running time: ',(tac-tic),'ms') return result return cal_time # create directory to store database def init_dir(): if not os.path.isdir('./audio_database'): os.mkdir('./audio_database') if not os.path.isdir('./audio_database/mix'): os.mkdir('./audio_database/mix') if not os.path.isdir('./audio_database/single'): os.mkdir('./audio_database/single') if not os.path.isdir('./audio_database/crm'): os.mkdir('./audio_database/crm') def generate_path_list(sample_range=SAMPLE_RANGE,repo_path=REPO_PATH): ''' :param sample_range: :param repo_path: :return: 2D array with idx and path ''' audio_path_list = [] for i in range(sample_range[0],sample_range[1]): path = repo_path + '/trim_audio_train%d.wav'%i if os.path.exists(path): audio_path_list.append((i,path)) print('length of the path list: ',len(audio_path_list)) return audio_path_list # data generate function def single_audio_to_npy(audio_path_list,fix_sr=16000): for idx,path in audio_path_list: data, _ = librosa.load(path, sr=fix_sr) data = utils.fast_stft(data) name = 'single-%05d'%idx np.save(('audio_database/single/%s.npy'%name),data) def generate_mix_sample(audio_path_list,num_speaker,fix_sr=16000,verbose=0): ''' generate mix sample from audios in the list :param audio_path_list: list contains path of the wav audio file :param num_speaker: specify the task for speech separation :param fix_sr: fix sample rate ''' # initiate variables # shape of F_mix = (298,257,2) # shpae of crm = (298,257,2) data_list = [] F_list = [] # STFT list for each sample cRM_list = [] mix_name = "mix" crm_name = "crm" post_name = "" # import data for i in range(num_speaker): idx,path =audio_path_list[i] post_name += "-%05d"%idx data, _ = librosa.load(path,sr=fix_sr) data_list.append(data) # create mix audio according to mix rate mix_rate = 1.0 / float(num_speaker) mix = np.zeros(shape=data_list[0].shape) for data in data_list: mix += data*mix_rate # transfrom data via STFT and several preprocessing function for i in range(num_speaker): F = utils.fast_stft(data_list[i],power=False) F_list.append(F) F_mix = utils.fast_stft(mix,power=False) # create cRM for each speaker and fill into y_sample for i in range(num_speaker): cRM_list.append(utils.fast_cRM(F_list[i],F_mix)) # return values if verbose == 1: print('shape of X: ',F_mix.shape) for i in range(len(cRM_list)): print('shape of cRM%s :'%i,cRM_list[i].shape) # save record in txt mix_name += post_name crm_name += post_name # write txt with open('audio_database/dataset.txt','a') as f: f.write(mix_name+".npy") for i in range(len(cRM_list)): line = " " + crm_name + ("-%05d"%audio_path_list[i][0]) + ".npy" f.write(line) f.write("\n") # save file as npy np.save(('audio_database/mix/%s.npy'%mix_name), F_mix) for i in range(len(cRM_list)): name = crm_name + ("-%05d"%audio_path_list[i][0]) np.save(('audio_database/crm/%s.npy'%name), cRM_list[i]) @timit def generate_dataset(sample_range,repo_path,num_speaker=2): ''' A function to generate dataset :param sample_range: range of the sample to create the dataset :param repo_path: audio repository :param num_speaker: number of speaker to separate :return: X_data, y_data ''' audio_path_list = generate_path_list(sample_range,repo_path) num_data = 0 combinations = itertools.combinations(audio_path_list,num_speaker) for combo in combinations: num_data += 1 generate_mix_sample(combo,num_speaker) print('number of the data generated: ',num_data) @timit def train_test_split(num_start = 0,num_data = 50000,database_txt_path="audio_database/dataset.txt", train_txt_path="audio_database/dataset_train.txt",test_txt_path="audio_database/dataset_val.txt",test_rate=0.01): step = 1 // test_rate count = 0 with open(database_txt_path,'r') as f: for i in range(num_start): _ = f.read() train_txt = open(train_txt_path,'a') test_txt = open(test_txt_path,'a') for line in f: if count > num_data : break count+=1 if count%step==0: test_txt.write(line) continue train_txt.write(line) train_txt.close() test_txt.close() @timit def create_testset(num_start = 53400,num_data=12375,database_txt_path="audio_database/dataset.txt",test_txt_path="audio_database/dataset_test.txt"): count = 0 with open(database_txt_path,'r') as f: test_txt = open(test_txt_path, 'a') for i in range(num_start): f.readline() for line in f: if count > num_data: break count += 1 test_txt.write(line) test_txt.close() ##################################### # create directory init_dir() ## generate training dataset if TRAIN: # generate path audio_path_list = generate_path_list() # generate single data single_audio_to_npy = single_audio_to_npy(audio_path_list) # generate mix data generate_dataset(sample_range=SAMPLE_RANGE,repo_path=REPO_PATH) # split train and test txt instructor file train_test_split() ## generate testing dataset if TEST: audio_path_list = generate_path_list(sample_range=TEST_RANGE,repo_path=REPO_PATH) single_audio_to_npy(audio_path_list) generate_dataset(sample_range=TEST_RANGE, repo_path=REPO_PATH) create_testset()
en
0.60013
# Parameter # data usage to generate database # data usage to generate database # time measure decorator # create directory to store database :param sample_range: :param repo_path: :return: 2D array with idx and path # data generate function generate mix sample from audios in the list :param audio_path_list: list contains path of the wav audio file :param num_speaker: specify the task for speech separation :param fix_sr: fix sample rate # initiate variables # shape of F_mix = (298,257,2) # shpae of crm = (298,257,2) # STFT list for each sample # import data # create mix audio according to mix rate # transfrom data via STFT and several preprocessing function # create cRM for each speaker and fill into y_sample # return values # save record in txt # write txt # save file as npy A function to generate dataset :param sample_range: range of the sample to create the dataset :param repo_path: audio repository :param num_speaker: number of speaker to separate :return: X_data, y_data ##################################### # create directory ## generate training dataset # generate path # generate single data # generate mix data # split train and test txt instructor file ## generate testing dataset
2.195486
2
social/backends/qq.py
raccoongang/python-social-auth
1,987
6617596
<reponame>raccoongang/python-social-auth from social_core.backends.qq import QQOAuth2
from social_core.backends.qq import QQOAuth2
none
1
1.08554
1
datasets/generate/pack-hdf5.py
patharanordev/novel-view-synthesis
0
6617597
<gh_stars>0 import os import h5py import ipyplot import numpy as np from PIL import Image data = None base_path = "specific/images/" filename = "specific/data_chair.hdf5" # clear the old hdf5 if os.path.exists(filename): os.remove(filename) h5 = h5py.File(filename, "a") for i in os.listdir(base_path): img_path = os.path.join(base_path, i) img_name = i.split('.')[0] img_pose = img_name.split('_')[1:] img_pose = [int(img_pose[0]), int(img_pose[1])] # print('image:', img_name) # print('pose:', img_pose) # create group by image name img_group = h5.create_group(img_name) # add image img_group.create_dataset('image', data=np.asarray(Image.open(img_path))) # add pose img_group.create_dataset('pose', data=img_pose) h5.close()
import os import h5py import ipyplot import numpy as np from PIL import Image data = None base_path = "specific/images/" filename = "specific/data_chair.hdf5" # clear the old hdf5 if os.path.exists(filename): os.remove(filename) h5 = h5py.File(filename, "a") for i in os.listdir(base_path): img_path = os.path.join(base_path, i) img_name = i.split('.')[0] img_pose = img_name.split('_')[1:] img_pose = [int(img_pose[0]), int(img_pose[1])] # print('image:', img_name) # print('pose:', img_pose) # create group by image name img_group = h5.create_group(img_name) # add image img_group.create_dataset('image', data=np.asarray(Image.open(img_path))) # add pose img_group.create_dataset('pose', data=img_pose) h5.close()
en
0.586645
# clear the old hdf5 # print('image:', img_name) # print('pose:', img_pose) # create group by image name # add image # add pose
2.547083
3
OCR/businesscard/preprocess/gen_list.py
cvtower/Synthhub_AI
2
6617598
import os import cv2 import shutil src_path = './train_images/' imagelist = os.listdir(src_path) f=open("train_list.txt", "a+") for imgname in imagelist: if(imgname.endswith(".jpg")): print(imgname) result_str = imgname+'\n' f.write(result_str)
import os import cv2 import shutil src_path = './train_images/' imagelist = os.listdir(src_path) f=open("train_list.txt", "a+") for imgname in imagelist: if(imgname.endswith(".jpg")): print(imgname) result_str = imgname+'\n' f.write(result_str)
none
1
2.674595
3
code-211.py
cpermuttacth/BornToDev
0
6617599
a = input() while len(list(a)) > 1: a = str(sum([int(x) for x in list(a)])) print(a)
a = input() while len(list(a)) > 1: a = str(sum([int(x) for x in list(a)])) print(a)
none
1
3.31694
3
src/realm/utils/filters/ignore.py
orlevii/realm
3
6617600
<reponame>orlevii/realm<gh_stars>1-10 import fnmatch from typing import List from realm.entities import RealmContext class IgnoreFilter: @classmethod def apply(cls, ctx: RealmContext, ignores: List[str]): if not ignores: return filtered = [] for ignore in ignores: f = {p for p in ctx.projects if not fnmatch.fnmatch(p.name, ignore)} filtered.append(f) ctx.projects = cls._intersect(filtered) @classmethod def _intersect(cls, filtered): result = filtered[0] for f in filtered: result = result.intersection(f) return list(result)
import fnmatch from typing import List from realm.entities import RealmContext class IgnoreFilter: @classmethod def apply(cls, ctx: RealmContext, ignores: List[str]): if not ignores: return filtered = [] for ignore in ignores: f = {p for p in ctx.projects if not fnmatch.fnmatch(p.name, ignore)} filtered.append(f) ctx.projects = cls._intersect(filtered) @classmethod def _intersect(cls, filtered): result = filtered[0] for f in filtered: result = result.intersection(f) return list(result)
none
1
2.259501
2
ic_shop/migrations/0011_auto_20191112_1501.py
hellohufan/beautyServer
0
6617601
<filename>ic_shop/migrations/0011_auto_20191112_1501.py<gh_stars>0 # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-11-12 15:01 from __future__ import unicode_literals import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ic_shop', '0010_auto_20191111_1348'), ] operations = [ migrations.CreateModel( name='ReportOverview', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('score', models.CharField(default='0', max_length=20, null=True, verbose_name='综合得分')), ('skinType', models.IntegerField(default=-1, validators=[django.core.validators.MaxValueValidator(3), django.core.validators.MinValueValidator(-1)], verbose_name='皮肤类型, 0-干性皮肤, 1-油性皮肤, 2-中性皮肤, 3-混合型皮肤')), ('skinAge', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)], verbose_name='皮肤年龄, 15-70')), ('opinion', models.CharField(default='', max_length=1024, null=True, verbose_name='理疗师综合意见')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ic_shop.Customer', verbose_name='所属用户')), ], options={ 'verbose_name': '报告总览', 'verbose_name_plural': '报告总览', }, ), migrations.CreateModel( name='ReportOverviewShopItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('overView', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='overView', to='ic_shop.ReportOverview', verbose_name='报告总览')), ('shopItem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shoItem', to='ic_shop.ShopItem', verbose_name='美容商品')), ], ), migrations.AddField( model_name='reportoverview', name='shopItem', field=models.ManyToManyField(through='ic_shop.ReportOverviewShopItem', to='ic_shop.ShopItem', verbose_name='推荐美容产品'), ), migrations.AlterUniqueTogether( name='reportoverviewshopitem', unique_together=set([('overView', 'shopItem')]), ), ]
<filename>ic_shop/migrations/0011_auto_20191112_1501.py<gh_stars>0 # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-11-12 15:01 from __future__ import unicode_literals import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ic_shop', '0010_auto_20191111_1348'), ] operations = [ migrations.CreateModel( name='ReportOverview', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('score', models.CharField(default='0', max_length=20, null=True, verbose_name='综合得分')), ('skinType', models.IntegerField(default=-1, validators=[django.core.validators.MaxValueValidator(3), django.core.validators.MinValueValidator(-1)], verbose_name='皮肤类型, 0-干性皮肤, 1-油性皮肤, 2-中性皮肤, 3-混合型皮肤')), ('skinAge', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)], verbose_name='皮肤年龄, 15-70')), ('opinion', models.CharField(default='', max_length=1024, null=True, verbose_name='理疗师综合意见')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ic_shop.Customer', verbose_name='所属用户')), ], options={ 'verbose_name': '报告总览', 'verbose_name_plural': '报告总览', }, ), migrations.CreateModel( name='ReportOverviewShopItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('overView', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='overView', to='ic_shop.ReportOverview', verbose_name='报告总览')), ('shopItem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shoItem', to='ic_shop.ShopItem', verbose_name='美容商品')), ], ), migrations.AddField( model_name='reportoverview', name='shopItem', field=models.ManyToManyField(through='ic_shop.ReportOverviewShopItem', to='ic_shop.ShopItem', verbose_name='推荐美容产品'), ), migrations.AlterUniqueTogether( name='reportoverviewshopitem', unique_together=set([('overView', 'shopItem')]), ), ]
en
0.714314
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-11-12 15:01
1.551645
2
data/tools/python/src/inaira/data/model_creation.py
stfc-aeg/odin-inaira
0
6617602
import logging import click import sys import yaml import os import tensorflow as tf class ModelCreator(): def __init__(self, config_file): self.config = ModelCreatorConfig(config_file) self.model = None self.logger = logging.getLogger('model_creation') ch = logging. StreamHandler(sys.stdout) log_format = "%(asctime)s.%(msecs)03d %(levelname)-5s %(name)s - %(message)s" formatter = logging.Formatter(fmt=log_format, datefmt="%d/%m/%y %H:%M:%S") ch.setFormatter(formatter) self.logger.addHandler(ch) ch.setLevel("DEBUG") self.logger.setLevel("DEBUG") self.logger.debug("Model Creation Init") def create_model(self): self.logger.debug("Creating Model") preprocessing_layers = [ tf.keras.layers.experimental.preprocessing.Resizing( self.config.image_height, self.config.image_width, input_shape=self.config.image_shape ), tf.keras.layers.experimental.preprocessing.Rescaling(1./255) ] self.logger.debug("Preprocessing Layers completed") core_layers = self.conv_2d_pooling_layers(16, self.config.number_colour_layers) self.logger.debug("Core Layers Complete") dense_layers = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(self.config.num_classes) ] self.logger.debug("Dense Layers Completed") self.model = tf.keras.Sequential( preprocessing_layers + core_layers + dense_layers ) self.logger.debug("Model Created") if self.config.include_training: self.train_model() def train_model(self): self.logger.debug("Training Model (not currently implemented)") pass # not currently implemented def save_model(self): self.logger.debug("Saving Model") self.model.save(self.config.model_save_location) def conv_2d_pooling_layers(self, filters, number_colour_layers): return [ tf.keras.layers.Conv2D( filters, number_colour_layers, padding="same", activation='relu' ), tf.keras.layers.MaxPooling2D() ] class ModelCreatorConfig(): def __init__(self, config_file): self.number_colour_layers = 1 self.input_width = 2000 self.input_height = 1800 self.image_width = 2000 self.image_height = 1800 self.image_size = (self.input_width, self.input_height) self.num_classes = 2 self.model_save_location = "tf-model" self.include_training = False if config_file is not None: self.parse_file(config_file) def parse_file(self, file_name): with open(file_name) as config_file: config = yaml.safe_load(config_file) for(key, value) in config.items(): setattr(self, key, value) self.image_size = (self.input_width, self.input_height) self.image_shape = self.image_size + (self.number_colour_layers,) @click.command() @click.option('--config', help="The path to the required yml config file.") def main(config): model = ModelCreator(config) model.create_model() model.save_model() if __name__ == "__main__": main()
import logging import click import sys import yaml import os import tensorflow as tf class ModelCreator(): def __init__(self, config_file): self.config = ModelCreatorConfig(config_file) self.model = None self.logger = logging.getLogger('model_creation') ch = logging. StreamHandler(sys.stdout) log_format = "%(asctime)s.%(msecs)03d %(levelname)-5s %(name)s - %(message)s" formatter = logging.Formatter(fmt=log_format, datefmt="%d/%m/%y %H:%M:%S") ch.setFormatter(formatter) self.logger.addHandler(ch) ch.setLevel("DEBUG") self.logger.setLevel("DEBUG") self.logger.debug("Model Creation Init") def create_model(self): self.logger.debug("Creating Model") preprocessing_layers = [ tf.keras.layers.experimental.preprocessing.Resizing( self.config.image_height, self.config.image_width, input_shape=self.config.image_shape ), tf.keras.layers.experimental.preprocessing.Rescaling(1./255) ] self.logger.debug("Preprocessing Layers completed") core_layers = self.conv_2d_pooling_layers(16, self.config.number_colour_layers) self.logger.debug("Core Layers Complete") dense_layers = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(self.config.num_classes) ] self.logger.debug("Dense Layers Completed") self.model = tf.keras.Sequential( preprocessing_layers + core_layers + dense_layers ) self.logger.debug("Model Created") if self.config.include_training: self.train_model() def train_model(self): self.logger.debug("Training Model (not currently implemented)") pass # not currently implemented def save_model(self): self.logger.debug("Saving Model") self.model.save(self.config.model_save_location) def conv_2d_pooling_layers(self, filters, number_colour_layers): return [ tf.keras.layers.Conv2D( filters, number_colour_layers, padding="same", activation='relu' ), tf.keras.layers.MaxPooling2D() ] class ModelCreatorConfig(): def __init__(self, config_file): self.number_colour_layers = 1 self.input_width = 2000 self.input_height = 1800 self.image_width = 2000 self.image_height = 1800 self.image_size = (self.input_width, self.input_height) self.num_classes = 2 self.model_save_location = "tf-model" self.include_training = False if config_file is not None: self.parse_file(config_file) def parse_file(self, file_name): with open(file_name) as config_file: config = yaml.safe_load(config_file) for(key, value) in config.items(): setattr(self, key, value) self.image_size = (self.input_width, self.input_height) self.image_shape = self.image_size + (self.number_colour_layers,) @click.command() @click.option('--config', help="The path to the required yml config file.") def main(config): model = ModelCreator(config) model.create_model() model.save_model() if __name__ == "__main__": main()
en
0.952823
# not currently implemented
2.537616
3
Webapp/swagger_server/controllers/process_controller.py
spilioeve/WM-src
2
6617603
import connexion import six from swagger_server.models.process_response import ProcessResponse # noqa: E501 from swagger_server.models.text import Text # noqa: E501 from swagger_server.models.text_query import TextQuery # noqa: E501 from swagger_server import util from swagger_server.sofia_functions import _process_text, _process_query from swagger_server.security import requires_auth @requires_auth def process_query(body): # noqa: E501 """Submit text and queries for query-based reading Submit an object containing the key &#x60;text&#x60; whose value is the text to be processed by SOFIA. The object should also contain the key &#x60;query&#x60; which should be an array of queries to be used for query-based reading. # noqa: E501 :param body: An object containing &#x60;text&#x60; and &#x60;query&#x60;. :type body: dict | bytes :rtype: ProcessResponse """ if connexion.request.is_json: body = TextQuery.from_dict(connexion.request.get_json()) # noqa: E501 resp = _process_query(body.text, body.query) return resp @requires_auth def process_text(body): # noqa: E501 """Submit text for reading Submit an object containing the key &#x60;text&#x60; whose value is the text to be processed by SOFIA. # noqa: E501 :param body: A &#x60;text&#x60; object. :type body: dict | bytes :rtype: ProcessResponse """ if connexion.request.is_json: body = Text.from_dict(connexion.request.get_json()) # noqa: E501 resp = _process_text(body.text) return resp
import connexion import six from swagger_server.models.process_response import ProcessResponse # noqa: E501 from swagger_server.models.text import Text # noqa: E501 from swagger_server.models.text_query import TextQuery # noqa: E501 from swagger_server import util from swagger_server.sofia_functions import _process_text, _process_query from swagger_server.security import requires_auth @requires_auth def process_query(body): # noqa: E501 """Submit text and queries for query-based reading Submit an object containing the key &#x60;text&#x60; whose value is the text to be processed by SOFIA. The object should also contain the key &#x60;query&#x60; which should be an array of queries to be used for query-based reading. # noqa: E501 :param body: An object containing &#x60;text&#x60; and &#x60;query&#x60;. :type body: dict | bytes :rtype: ProcessResponse """ if connexion.request.is_json: body = TextQuery.from_dict(connexion.request.get_json()) # noqa: E501 resp = _process_query(body.text, body.query) return resp @requires_auth def process_text(body): # noqa: E501 """Submit text for reading Submit an object containing the key &#x60;text&#x60; whose value is the text to be processed by SOFIA. # noqa: E501 :param body: A &#x60;text&#x60; object. :type body: dict | bytes :rtype: ProcessResponse """ if connexion.request.is_json: body = Text.from_dict(connexion.request.get_json()) # noqa: E501 resp = _process_text(body.text) return resp
en
0.704819
# noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Submit text and queries for query-based reading Submit an object containing the key &#x60;text&#x60; whose value is the text to be processed by SOFIA. The object should also contain the key &#x60;query&#x60; which should be an array of queries to be used for query-based reading. # noqa: E501 :param body: An object containing &#x60;text&#x60; and &#x60;query&#x60;. :type body: dict | bytes :rtype: ProcessResponse # noqa: E501 # noqa: E501 Submit text for reading Submit an object containing the key &#x60;text&#x60; whose value is the text to be processed by SOFIA. # noqa: E501 :param body: A &#x60;text&#x60; object. :type body: dict | bytes :rtype: ProcessResponse # noqa: E501
2.432328
2
python/tests/conftest.py
dune-mirrors/dune-python
0
6617604
import pytest @pytest.fixture def dir(): import os return os.path.dirname(os.path.abspath(__file__)) + "/"
import pytest @pytest.fixture def dir(): import os return os.path.dirname(os.path.abspath(__file__)) + "/"
none
1
2.012254
2
cabin/io.py
seeqbio/cabin
1
6617605
<filename>cabin/io.py import os import csv import gzip import subprocess from lxml import etree from ftplib import FTP from dateutil import parser from pathlib import Path from . import logger, settings, CabinError def read_xsv(path, delimiter='\t', columns=None, header_leading_hash=True, ignore_leading_hash=False, gzipped=False, encoding=None): """ Parses a delimiter separated text file and yields rows as dictionaries. Args: delimiter (str): column delimiter. columns (list|None): If a list is given it is assumed that all lines of the source file are content lines (yielded as rows). If None it is expected that the first line of the file (the "header line") defines the column names. header_leading_hash (bool): Whether the header line has a leading `#`; ignored if `columns` is given. ignore_leading_hash: ignores lines with leading # from file contents. gzipped (bool): Whether the given file is gzipped. """ path = str(path) f = gzip.open(path, 'rt') if gzipped else open(path, 'r', encoding=encoding) logger.info('reading records from "{p}"'.format(p=f.name)) if columns is None: header = f.readline().strip() if header_leading_hash: if header[0] != '#': raise CabinError('Expected first line to start with #') header = header[1:] columns = header.split(delimiter) for line in f: if ignore_leading_hash and line.startswith('#'): continue values = line.strip().split(delimiter) yield dict(zip(columns, values)) f.close() def read_csv(path, delimiter=',', quotechar='"', encoding=None): """ Uses csv library to parse csv in the event that simple delimiter split is not enough. For example, some cells contain a citation, with a ',' in it, that is in quotes. eg: "Flex E, et al. Somatically acquired JAK1". """ with open(path, encoding=encoding) as infile: csv_content = csv.reader(infile, delimiter=delimiter, quotechar=quotechar) header = next(csv_content) for row in csv_content: yield dict(zip(header, row)) def read_vcf(path): path = str(path) logger.info('reading VCF records from "{p}"'.format(p=path)) vf = pysam.VariantFile(path) for variant in vf.fetch(): row = { 'CHROM': variant.chrom, 'POS': variant.pos, 'ID': variant.id, 'REF': variant.ref, 'ALT': variant.alts, # tuple: ('A',) 'QUAL': variant.qual, 'FILTER': variant.filter, 'INFO': variant.info, # eg usage: info.get('CLNDISDB') } yield row # A callback to free the memory used by elements retrieved by etree.iterparse # https://stackoverflow.com/a/12161078 # https://www.ibm.com/developerworks/xml/library/x-hiperfparse/ def xml_element_clear_memory(elem): elem.clear() # Also eliminate now-empty references from the root node to elem, # otherwise these dangling elements will swallow a lot of memory! for ancestor in elem.xpath('ancestor-or-self::*'): while ancestor.getprevious() is not None: del ancestor.getparent()[0] def xml_element_to_string(elem): return etree.tostring(elem, pretty_print=True, encoding='unicode') def read_xml(source, tag): """source is either path to a plain text XML file or a file object that reads in bytes.""" # use an incremental parser instead of loading the entire DOM in memory # NOTE users must clear the memory used by subelements retrieved via xpath # or the like after use. for _, elem in etree.iterparse(source, tag=tag): yield elem def read_obo(path): """ For each term in ontology, yields the name and id of the the term, the immediate children term, and immediate parent terms. All xrefs are kept and user can parse for a subset by prefix. Source: obo format. """ from pronto import Ontology # FIXME: UnicodeWarning: unsound encoding, assuming ISO-8859-1 (73% confidence) # with hpo import, not mondo or disease ontology ontology = Ontology(str(path)) for term in ontology.terms(): children = [child.id for child in ontology[term.id].subclasses(distance=1, with_self=False)] parents = [parent.id for parent in ontology[term.id].superclasses(distance=1, with_self=False)] yield { '_term': term, 'name': term.name, 'id': term.id, 'def': term.definition, 'children': children, 'parents': parents, 'xrefs': [xref.id for xref in term.xrefs] } def read_fasta(path, gzipped=False): f = gzip.open(path, 'rt') if gzipped else open(path, 'r') for record in SeqIO.parse(f, 'fasta'): yield record.id, str(record.seq) f.close() def wget(source, destination): cmd = ['wget', '-q', str(source), '-O', str(destination)] if not settings.CABIN_NON_INTERACTIVE: cmd = cmd + ['--show-progress'] proc = subprocess.Popen(cmd) proc.communicate() if proc.returncode == 0: logger.info('Successfully downloaded to %s' % destination) else: os.remove(destination) raise CabinError('Failed to download to %s' % destination) def ftp_modify_time(ftp_server, ftp_path): """Returns a datetime object containing the modification time of a given FTP path.""" ftp = FTP(ftp_server) ftp.login() # MDTM command gives the file modification time # https://tools.ietf.org/html/rfc3659#section-3 resp_code, timestamp = ftp.voidcmd('MDTM ' + ftp_path).split() if resp_code == '213': # success return parser.parse(timestamp) else: ftp_url = 'ftp://{s}{p}'.format(s=ftp_server, p=ftp_path) raise CabinError('Failed to get FTP file modification time for: ' + ftp_url) def cut_tsv_with_zcat(src, dst): """ creates a file with first 5 columns only, eg: partial vcf for dbSNP""" command = 'zcat {src} | cut -f 1-5,8 > {dst}'.format(src=src, dst=dst) proc = subprocess.Popen(command, shell=True) proc.communicate() if proc.returncode: raise CabinError('Failed to cut: ' + str(src)) def gunzip(src, dst): command = 'gunzip -c {src} > {dst}'.format(src=src, dst=dst) proc = subprocess.Popen(command, shell=True) proc.communicate() if proc.returncode: raise CabinError('Failed to unzip: ' + str(src)) def unzip(zipname, extract_dir=None): logger.info('Unzipping: ' + str(zipname)) zipname = Path(zipname) if extract_dir is None: assert Path(zipname).suffix == '.zip', 'expected zip file name to end with ".zip"' extract_dir = zipname.parent / zipname.stem else: extract_dir = Path(extract_dir) if extract_dir.exists(): logger.info('Extracted directory exists, skipping unzip: ' + str(extract_dir)) return extract_dir extract_dir.mkdir(parents=True, exist_ok=True) cmd = ['unzip', str(zipname), '-d', str(extract_dir)] proc = subprocess.Popen(cmd) proc.communicate() if proc.returncode: extract_dir.rmtree(extract_dir) return extract_dir
<filename>cabin/io.py import os import csv import gzip import subprocess from lxml import etree from ftplib import FTP from dateutil import parser from pathlib import Path from . import logger, settings, CabinError def read_xsv(path, delimiter='\t', columns=None, header_leading_hash=True, ignore_leading_hash=False, gzipped=False, encoding=None): """ Parses a delimiter separated text file and yields rows as dictionaries. Args: delimiter (str): column delimiter. columns (list|None): If a list is given it is assumed that all lines of the source file are content lines (yielded as rows). If None it is expected that the first line of the file (the "header line") defines the column names. header_leading_hash (bool): Whether the header line has a leading `#`; ignored if `columns` is given. ignore_leading_hash: ignores lines with leading # from file contents. gzipped (bool): Whether the given file is gzipped. """ path = str(path) f = gzip.open(path, 'rt') if gzipped else open(path, 'r', encoding=encoding) logger.info('reading records from "{p}"'.format(p=f.name)) if columns is None: header = f.readline().strip() if header_leading_hash: if header[0] != '#': raise CabinError('Expected first line to start with #') header = header[1:] columns = header.split(delimiter) for line in f: if ignore_leading_hash and line.startswith('#'): continue values = line.strip().split(delimiter) yield dict(zip(columns, values)) f.close() def read_csv(path, delimiter=',', quotechar='"', encoding=None): """ Uses csv library to parse csv in the event that simple delimiter split is not enough. For example, some cells contain a citation, with a ',' in it, that is in quotes. eg: "Flex E, et al. Somatically acquired JAK1". """ with open(path, encoding=encoding) as infile: csv_content = csv.reader(infile, delimiter=delimiter, quotechar=quotechar) header = next(csv_content) for row in csv_content: yield dict(zip(header, row)) def read_vcf(path): path = str(path) logger.info('reading VCF records from "{p}"'.format(p=path)) vf = pysam.VariantFile(path) for variant in vf.fetch(): row = { 'CHROM': variant.chrom, 'POS': variant.pos, 'ID': variant.id, 'REF': variant.ref, 'ALT': variant.alts, # tuple: ('A',) 'QUAL': variant.qual, 'FILTER': variant.filter, 'INFO': variant.info, # eg usage: info.get('CLNDISDB') } yield row # A callback to free the memory used by elements retrieved by etree.iterparse # https://stackoverflow.com/a/12161078 # https://www.ibm.com/developerworks/xml/library/x-hiperfparse/ def xml_element_clear_memory(elem): elem.clear() # Also eliminate now-empty references from the root node to elem, # otherwise these dangling elements will swallow a lot of memory! for ancestor in elem.xpath('ancestor-or-self::*'): while ancestor.getprevious() is not None: del ancestor.getparent()[0] def xml_element_to_string(elem): return etree.tostring(elem, pretty_print=True, encoding='unicode') def read_xml(source, tag): """source is either path to a plain text XML file or a file object that reads in bytes.""" # use an incremental parser instead of loading the entire DOM in memory # NOTE users must clear the memory used by subelements retrieved via xpath # or the like after use. for _, elem in etree.iterparse(source, tag=tag): yield elem def read_obo(path): """ For each term in ontology, yields the name and id of the the term, the immediate children term, and immediate parent terms. All xrefs are kept and user can parse for a subset by prefix. Source: obo format. """ from pronto import Ontology # FIXME: UnicodeWarning: unsound encoding, assuming ISO-8859-1 (73% confidence) # with hpo import, not mondo or disease ontology ontology = Ontology(str(path)) for term in ontology.terms(): children = [child.id for child in ontology[term.id].subclasses(distance=1, with_self=False)] parents = [parent.id for parent in ontology[term.id].superclasses(distance=1, with_self=False)] yield { '_term': term, 'name': term.name, 'id': term.id, 'def': term.definition, 'children': children, 'parents': parents, 'xrefs': [xref.id for xref in term.xrefs] } def read_fasta(path, gzipped=False): f = gzip.open(path, 'rt') if gzipped else open(path, 'r') for record in SeqIO.parse(f, 'fasta'): yield record.id, str(record.seq) f.close() def wget(source, destination): cmd = ['wget', '-q', str(source), '-O', str(destination)] if not settings.CABIN_NON_INTERACTIVE: cmd = cmd + ['--show-progress'] proc = subprocess.Popen(cmd) proc.communicate() if proc.returncode == 0: logger.info('Successfully downloaded to %s' % destination) else: os.remove(destination) raise CabinError('Failed to download to %s' % destination) def ftp_modify_time(ftp_server, ftp_path): """Returns a datetime object containing the modification time of a given FTP path.""" ftp = FTP(ftp_server) ftp.login() # MDTM command gives the file modification time # https://tools.ietf.org/html/rfc3659#section-3 resp_code, timestamp = ftp.voidcmd('MDTM ' + ftp_path).split() if resp_code == '213': # success return parser.parse(timestamp) else: ftp_url = 'ftp://{s}{p}'.format(s=ftp_server, p=ftp_path) raise CabinError('Failed to get FTP file modification time for: ' + ftp_url) def cut_tsv_with_zcat(src, dst): """ creates a file with first 5 columns only, eg: partial vcf for dbSNP""" command = 'zcat {src} | cut -f 1-5,8 > {dst}'.format(src=src, dst=dst) proc = subprocess.Popen(command, shell=True) proc.communicate() if proc.returncode: raise CabinError('Failed to cut: ' + str(src)) def gunzip(src, dst): command = 'gunzip -c {src} > {dst}'.format(src=src, dst=dst) proc = subprocess.Popen(command, shell=True) proc.communicate() if proc.returncode: raise CabinError('Failed to unzip: ' + str(src)) def unzip(zipname, extract_dir=None): logger.info('Unzipping: ' + str(zipname)) zipname = Path(zipname) if extract_dir is None: assert Path(zipname).suffix == '.zip', 'expected zip file name to end with ".zip"' extract_dir = zipname.parent / zipname.stem else: extract_dir = Path(extract_dir) if extract_dir.exists(): logger.info('Extracted directory exists, skipping unzip: ' + str(extract_dir)) return extract_dir extract_dir.mkdir(parents=True, exist_ok=True) cmd = ['unzip', str(zipname), '-d', str(extract_dir)] proc = subprocess.Popen(cmd) proc.communicate() if proc.returncode: extract_dir.rmtree(extract_dir) return extract_dir
en
0.822789
Parses a delimiter separated text file and yields rows as dictionaries. Args: delimiter (str): column delimiter. columns (list|None): If a list is given it is assumed that all lines of the source file are content lines (yielded as rows). If None it is expected that the first line of the file (the "header line") defines the column names. header_leading_hash (bool): Whether the header line has a leading `#`; ignored if `columns` is given. ignore_leading_hash: ignores lines with leading # from file contents. gzipped (bool): Whether the given file is gzipped. #') Uses csv library to parse csv in the event that simple delimiter split is not enough. For example, some cells contain a citation, with a ',' in it, that is in quotes. eg: "Flex E, et al. Somatically acquired JAK1". # tuple: ('A',) # eg usage: info.get('CLNDISDB') # A callback to free the memory used by elements retrieved by etree.iterparse # https://stackoverflow.com/a/12161078 # https://www.ibm.com/developerworks/xml/library/x-hiperfparse/ # Also eliminate now-empty references from the root node to elem, # otherwise these dangling elements will swallow a lot of memory! source is either path to a plain text XML file or a file object that reads in bytes. # use an incremental parser instead of loading the entire DOM in memory # NOTE users must clear the memory used by subelements retrieved via xpath # or the like after use. For each term in ontology, yields the name and id of the the term, the immediate children term, and immediate parent terms. All xrefs are kept and user can parse for a subset by prefix. Source: obo format. # FIXME: UnicodeWarning: unsound encoding, assuming ISO-8859-1 (73% confidence) # with hpo import, not mondo or disease ontology Returns a datetime object containing the modification time of a given FTP path. # MDTM command gives the file modification time # https://tools.ietf.org/html/rfc3659#section-3 # success creates a file with first 5 columns only, eg: partial vcf for dbSNP
2.918747
3
Random/napGet2/napGet/naptest/test.py
Dahjochri/Misc-Code
0
6617606
import multiprocessing, os def lol(a): print "Process:", os.getpid() print "Value:", a if __name__ == "__main__": p = multiprocessing.Pool(processes = 1) p.map_async(lol, range(200), 10).get(9999999)
import multiprocessing, os def lol(a): print "Process:", os.getpid() print "Value:", a if __name__ == "__main__": p = multiprocessing.Pool(processes = 1) p.map_async(lol, range(200), 10).get(9999999)
none
1
2.686822
3
demo/step_2.py
sitting-duck/chronology
189
6617607
from chronological import read_prompt, fetch_max_search_doc, main async def fetch_top_animal(query): # splitting on ',' -- similar to what you might do with a csv file prompt_animals = read_prompt('animals').split(',') return await fetch_max_search_doc(query, prompt_animals, engine="ada") async def fetch_three_top_animals(query, n): # splitting on ',' -- similar to what you might do with a csv file prompt_animals = read_prompt('animals').split(',') return await fetch_max_search_doc(query, prompt_animals, engine="ada", n=n) async def logic(): fetch_top_animal_res = await fetch_top_animal("monkey") fetch_top_three_animals_res = await fetch_three_top_animals("monkey", 3) print('-------------------------') print('Fetch Top Animal Response: {0}'.format(fetch_top_animal_res)) print('-------------------------') print('Fetch Top Three Animals Response: {0}'.format(fetch_top_three_animals_res)) print('-------------------------') main(logic)
from chronological import read_prompt, fetch_max_search_doc, main async def fetch_top_animal(query): # splitting on ',' -- similar to what you might do with a csv file prompt_animals = read_prompt('animals').split(',') return await fetch_max_search_doc(query, prompt_animals, engine="ada") async def fetch_three_top_animals(query, n): # splitting on ',' -- similar to what you might do with a csv file prompt_animals = read_prompt('animals').split(',') return await fetch_max_search_doc(query, prompt_animals, engine="ada", n=n) async def logic(): fetch_top_animal_res = await fetch_top_animal("monkey") fetch_top_three_animals_res = await fetch_three_top_animals("monkey", 3) print('-------------------------') print('Fetch Top Animal Response: {0}'.format(fetch_top_animal_res)) print('-------------------------') print('Fetch Top Three Animals Response: {0}'.format(fetch_top_three_animals_res)) print('-------------------------') main(logic)
en
0.933796
# splitting on ',' -- similar to what you might do with a csv file # splitting on ',' -- similar to what you might do with a csv file
3.385509
3
annotations/admin.py
DevangS/CoralNet
4
6617608
from annotations.models import Label, LabelSet, LabelGroup, Annotation from django.contrib import admin import reversion class LabelAdmin(admin.ModelAdmin): list_display = ('name', 'code', 'group', 'create_date') class LabelSetAdmin(admin.ModelAdmin): pass class LabelGroupAdmin(admin.ModelAdmin): list_display = ('name', 'code') # Inherit from reversion.VersionAdmin to enable versioning for a particular model. class AnnotationAdmin(reversion.VersionAdmin): list_display = ('source', 'image', 'point') admin.site.register(Label, LabelAdmin) admin.site.register(LabelSet, LabelSetAdmin) admin.site.register(LabelGroup, LabelGroupAdmin) admin.site.register(Annotation, AnnotationAdmin)
from annotations.models import Label, LabelSet, LabelGroup, Annotation from django.contrib import admin import reversion class LabelAdmin(admin.ModelAdmin): list_display = ('name', 'code', 'group', 'create_date') class LabelSetAdmin(admin.ModelAdmin): pass class LabelGroupAdmin(admin.ModelAdmin): list_display = ('name', 'code') # Inherit from reversion.VersionAdmin to enable versioning for a particular model. class AnnotationAdmin(reversion.VersionAdmin): list_display = ('source', 'image', 'point') admin.site.register(Label, LabelAdmin) admin.site.register(LabelSet, LabelSetAdmin) admin.site.register(LabelGroup, LabelGroupAdmin) admin.site.register(Annotation, AnnotationAdmin)
en
0.602068
# Inherit from reversion.VersionAdmin to enable versioning for a particular model.
2.023663
2
integrated.py
thautwarm/Stardust
2
6617609
import warnings from copy import deepcopy warnings.filterwarnings("ignore") class ObjectUsageError(Exception): pass class CheckConditionError(Exception): pass class UnsolvedError(Exception): pass def handle_error(parser): func = parser.match history = (0, parser.name) def _f(objs, meta=None, partial=True): if not meta: raise CheckConditionError("Meta Information not defined yet!") res = func(objs, meta=meta) if res is None: c = meta.count r = meta.rdx for ch in objs[c:]: if ch is '\n': r += 1 c += 1 continue break info = " ".join(objs[c:c + 10]) if len(objs) > c + 10: info += '...' raise SyntaxError(''' Syntax Error at row {r} Error startswith : {info} '''.format(r=r, info=info)) else: if not partial and len(objs) != meta.count: warnings.warn("Parsing unfinished.") c = meta.count r = meta.rdx for ch in objs[c:]: if ch is '\n': r += 1 c += 1 continue break info = " ".join(objs[c:c + 10]) if len(objs) > c + 10: info += '...' raise SyntaxError(''' Syntax Error at row {r} Error startswith : {info} '''.format(r=r, info=info)) return res return _f # ====== Define Generic Type Params ============= WarningInfo = """ You're trying to visit the elems that've been deprecated. If it occurred when you're using EBNFParser, report it as a BUG at `https://github.com/thautwarm/EBNFParser`. Thanks a lot! """ # ====== Undef = None class Const: def __new__(self): raise ObjectUsageError("You're trying to new an instance with a module.") UnMatched = None NameFilter = 0 RawFilter = 1 RegexFilter = 2 class RecursiveFound(Exception): def __init__(self, node): self.node = node self.possibilities = [] def add(self, possibility): self.possibilities.append(possibility) def __str__(self): s = '=====\n' s += self.node.name + '\n' s += '\n'.join(a.name + ' | ' + str([c.name for c in b]) for a, b in self.possibilities) return s class Recur: def __new__(self, name, count): return (name, count) class Trace: def __init__(self, trace=Undef, length=Undef): self.length = length if length is not Undef else \ len(trace) if trace is not Undef else \ 0 self.content = trace if trace is not Undef else \ [] self._Mem = len(self.content) def __iter__(self): yield from [elem for elem in self.content[:self.length]] def __getitem__(self, item): if isinstance(item, int): if item >= self.length: warnings.warn(WarningInfo) return self.content[item] elif isinstance(item, slice): if item.stop > self.length: warnings.warn(WarningInfo) return self.content[item] def append(self, elem): # reuse the memory cache if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(elem) elif self.length < self._Mem: self.content[self.length] = elem self.length += 1 def new(self, constructor): # just can be used for Type `Trace[Contrainer[T]]` # reuse the memory cache if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(constructor()) elif self.length < self._Mem: self.content[self.length].length = 0 self.length += 1 def pop(self): self.length -= 1 assert self.length >= 0 def where(self, obj): for idx, elem in enumerate(self.content[:self.length]): if elem is obj: return idx return Undef # ============================= # Pattern Matching # ============================= def Generate_RegexPatten_From(mode, escape=False): import re return re.compile(re.escape(mode)) if escape else re.compile(mode) # The Reason why to repeat myself at the following three functions. # `Match_Char_By` `Match_Without_Regex_By`, `Match_With_Regex_By` """ This pattern is the key to speed up the parser framework. For sure, I can abstract the way how the input string compares with the parser's memeber `mode`, and then, these codes `if value == self.mode` `self.mode.fullmatch(value)` can be unified to `if someFunc(value, mode)` or `if someFunc(value, self)` However, abstraction can have costly results. """ def Match_Char_By(self): def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if value is self.mode: if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched return match def Match_Without_Regex_By(self): def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if value == self.mode: if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched return match def Match_With_Regex_By(self): def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if self.mode.fullmatch(value): if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched return match # ============================= # Parser # ============================= def analyze(ebnf): if len(ebnf) is 1 or not all(ebnf): return None groups = dict() groupOrder = [] for case in ebnf: groupId = case[0].name if groupId not in groupOrder: groups[groupId] = [case] groupOrder.append(groupId) else: groups[groupId].append(case) if len(groupOrder) is 1: return None return groups, groupOrder def grammarRemake(groups, groupOrder): return [([groups[groupId][0][0], DependentAstParser( *[case[1:] for case in groups[groupId]])] if len(groups[groupId]) > 1 else groups[groupId][0]) for groupId in groupOrder] def optimize(ebnf): analyzed = analyze(ebnf) if analyzed is None: return ebnf groups, groupOrder = analyzed return grammarRemake(groups, groupOrder) class Ignore: Value = 0 Name = 1 class BaseParser: name = Undef has_recur = Undef def match(self, objs, meta, recur=Undef): raise Exception("There is no access to an abstract method.") # incomplete class CharParser(BaseParser): def __init__(self, mode, name=Undef): length = len(mode) assert length is 1 self.mode = mode self.name = "'{MODE}'".format(MODE=mode) if name is Undef else name self.match = Match_Char_By(self) class StringParser(BaseParser): def __init__(self, mode, name=Undef, isRegex=False, ifIsRegexThenEscape=False): self.name = name if name is not Undef else "'{MODE}'".format(MODE=mode) self.isRegex = isRegex if isRegex: self.mode = Generate_RegexPatten_From(mode, escape=ifIsRegexThenEscape) self.match = Match_With_Regex_By(self) else: self.mode = mode self.match = Match_Without_Regex_By(self) class NotIn(StringParser): def __init__(self, mode, name=Undef): self.name = name if name is not Undef else "'{MODE}'".format(MODE=mode) self.mode = mode def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if value not in self.mode: if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched self.match = match class Ref(BaseParser): def __init__(self, name): self.name = name class AstParser(BaseParser): def __init__(self, *ebnf, name=Undef, toIgnore=Undef): # each in the cache will be processed into a parser. self.cache = optimize(ebnf) # the possible output types for an series of input tokenized words. self.possibilities = [] # whether this parser will refer to itself. self.has_recur = False # the identity of a parser. self.name = name if name is not Undef else \ ' | '.join(' '.join(map(lambda parser: parser.name, ebnf_i)) for ebnf_i in ebnf) # is this parser compiled, must be False when initializing. self.compiled = False # if a parser's name is in this set, the result it output will be ignored when parsing. self.toIgnore = toIgnore def compile(self, namespace, recurSearcher): if self.name in recurSearcher: self.has_recur = True self.compiled = True else: recurSearcher.add(self.name) if self.compiled: return self for es in self.cache: self.possibilities.append([]) for e in es: if isinstance(e, StringParser) or \ isinstance(e, CharParser): self.possibilities[-1].append(e) elif isinstance(e, Ref): e = namespace[e.name] if isinstance(e, AstParser): e.compile(namespace, recurSearcher) self.possibilities[-1].append(e) if not self.has_recur and e.has_recur: self.has_recur = True elif isinstance(e, AstParser): if e.name not in namespace: namespace[e.name] = e else: e = namespace[e.name] e.compile(namespace, recurSearcher) self.possibilities[-1].append(e) if not self.has_recur and e.has_recur: self.has_recur = True else: print(e) raise UnsolvedError("Unknown Parser Type.") if hasattr(self, 'cache'): del self.cache if self.name in recurSearcher: recurSearcher.remove(self.name) if not self.compiled: self.compiled = True def match(self, objs, meta, recur=Undef): if self.has_recur and self in meta.trace[meta.count]: if isinstance(self, SeqParser) or recur is self: return Const.UnMatched raise RecursiveFound(self) meta.branch() if self.has_recur: meta.trace[meta.count].append(self) for possibility in self.possibilities: meta.branch() result = self.patternMatch(objs, meta, possibility, recur=recur) if result is Const.UnMatched: meta.rollback() continue elif isinstance(result, Ast): meta.pull() break elif isinstance(result, RecursiveFound): meta.rollback() break meta.pull() return result def patternMatch(self, objs, meta, possibility, recur=Undef): try: # Not recur result = Ast(meta.clone(), self.name) for parser in possibility: r = parser.match(objs, meta=meta, recur=recur) # if `result` is still empty, it might not allow LR now. if isinstance(r, str) or isinstance(r, Ast): resultMerge(result, r, parser, self.toIgnore) elif r is Const.UnMatched: return Const.UnMatched elif isinstance(r, RecursiveFound): raise r else: raise UnsolvedError("Unsolved return type. {}".format(r.__class__)) else: return result except RecursiveFound as RecurInfo: RecurInfo.add((self, possibility[possibility.index(parser) + 1:])) # RecurInfo has a trace of Beginning Recur Node to Next Recur Node with # specific possibility. if RecurInfo.node is not self: return RecurInfo return leftRecursion(objs, meta, possibility, RecurInfo) def resultMerge(result, r, parser, toIgnore): if isinstance(parser, SeqParser) or isinstance(parser, DependentAstParser): if toIgnore is Undef: result.extend(r) else: result.extend([item for item in r if ((item not in toIgnore[Const.RawFilter]) if isinstance(item, str) else (item.name not in toIgnore[Const.NameFilter]))]) else: if toIgnore is Undef: result.append(r) else: if isinstance(r, str): if r not in toIgnore[Const.RawFilter]: result.append(r) elif r.name not in toIgnore[Const.NameFilter]: result.append(r) def leftRecursion(objs, meta, RecurCase, RecurInfo): recur = RecurInfo.node for case in recur.possibilities: if case is RecurCase: continue meta.branch() veryFirst = recur.patternMatch(objs, meta, case, recur=recur) if isinstance(veryFirst, RecursiveFound) or veryFirst is Const.UnMatched: meta.rollback() continue else: meta.pull() first = veryFirst recurDeepCount = 0 while True: meta.branch() for parser, possibility in RecurInfo.possibilities: result = parser.patternMatch(objs, meta, possibility, recur=recur) if result is Const.UnMatched: meta.rollback() return Const.UnMatched if recurDeepCount is 0 else veryFirst elif isinstance(result, Ast): result.appendleft(first) elif isinstance(result, RecursiveFound): raise UnsolvedError("Error occurs : found a new left recursion when handling an other.") else: raise UnsolvedError("Unsolved return from method `patternMatch`.") first = result recurDeepCount += 1 meta.pull() veryFirst = first else: # Fail to match any case. return Const.UnMatched class DependentAstParser(AstParser): pass class SeqParser(AstParser): def __init__(self, *ebnf, name=Undef, atleast=0, atmost=Undef): super(SeqParser, self).__init__(*ebnf, name=name) if atmost is Undef: if atleast is 0: self.name = "({NAME})*".format(NAME=self.name) else: self.name = '({NAME}){{{AT_LEAST}}}'.format(NAME=self.name, AT_LEAST=atleast) else: self.name = "({NAME}){{{AT_LEAST},{AT_MOST}}}".format( NAME=self.name, AT_LEAST=atleast, AT_MOST=atmost) self.atleast = atleast self.atmost = atmost def match(self, objs, meta, recur=Undef): result = Ast(meta.clone(), self.name) if meta.count == len(objs): # boundary cases if self.atleast is 0: return result return Const.UnMatched meta.branch() matchedNum = 0 if self.atmost is not Undef: """ (ast){a b} """ while True: if matchedNum >= self.atmost: break try: r = super(SeqParser, self).match(objs, meta=meta, recur=recur) except IndexError: break if r is Const.UnMatched: break elif isinstance(r, RecursiveFound): raise UnsolvedError("Cannot make left recursions in SeqParser!!!") result.extend(r) matchedNum += 1 else: """ ast{a} | [ast] | ast* """ while True: try: r = super(SeqParser, self).match(objs, meta=meta, recur=recur) except IndexError: break if r is Const.UnMatched: break elif isinstance(r, RecursiveFound): raise UnsolvedError("Cannot make left recursions in SeqParser!!!") result.extend(r) matchedNum += 1 if matchedNum < self.atleast: meta.rollback() return Const.UnMatched meta.pull() return result class MetaInfo: def __init__(self, count=0, rdx=0, trace=None, file_name=None): self.count = count if trace: self.trace = trace else: self.trace = Trace() self.trace.append(Trace()) self.rdx = rdx self.history = [] self.fileName = file_name if file_name else "<input>" def new(self): self.count += 1 self.trace.new(Trace) def branch(self): """ Save a record of parsing history in order to trace back. """ self.history.append((self.count, self.rdx, self.trace[self.count].length)) def rollback(self): """ Trace back. """ try: count, rdx, length = self.history.pop() except IndexError: return None self.count = count self.rdx = rdx self.trace.length = count + 1 self.trace[count].length = length def pull(self): """ Confirm the current parsing results. Pop a record in parsing history. """ try: self.history.pop() except IndexError: raise Exception("pull no thing") def clone(self): """ Get a copy of (RowIdx, NumberOfParsedWords, FileName) from current meta information. """ return self.rdx, self.count, self.fileName def __str__(self): return """ -------------------- COUNT : {COUNT} ROW_IDX : {ROW_DIX} TRACE : {TRACE} -------------------- """.format(COUNT=self.count, ROW_DIX=self.rdx, TRACE='\n'.join( ['[' + (','.join([item.name for item in unit])) + ']' for unit in self.trace]) ) class Trace: def __init__(self, trace=None, length=None): self.length = length if length is not None else \ len(trace) if trace is not None else \ 0 self.content = trace if trace is not None else \ [] self._Mem = len(self.content) def __iter__(self): yield from [elem for elem in self.content[:self.length]] def __getitem__(self, item): if isinstance(item, int): if item >= self.length: warnings.warn("....") return self.content[item] elif isinstance(item, slice): if item.stop > self.length: warnings.warn("....") return self.content[item] def append(self, elem): if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(elem) elif self.length < self._Mem: self.content[self.length] = elem self.length += 1 def new(self, constructor): if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(constructor()) elif self.length < self._Mem: self.content[self.length].length = 0 self.length += 1 def pop(self): self.length -= 1 assert self.length >= 0 def where(self, obj): for idx, elem in enumerate(self.content[:self.length]): if elem is obj: return idx INDENT_UNIT = ' ' * 4 class Ast(list): def __init__(self, meta, name): super(Ast, self).__init__() self.name = name self.meta = meta def appendleft(self, obj): self.reverse() self.append(obj) self.reverse() def __str__(self): return self.dump() def dump(self, indent=0): next_indent = indent + 1 return """{INDENT}{NAME}[ {CONTENT} {INDENT}]""".format(INDENT=INDENT_UNIT * indent, NAME=self.name, CONTENT='\n'.join( [ "{NEXT_INDENT}\"{STR}\"".format(NEXT_INDENT=INDENT_UNIT * next_indent, STR=node) if isinstance(node, str) else \ node.dump(next_indent) for node in self ])) def dumpToJSON(self): return dict(name=self.name, value=[node if isinstance(node, str) else \ node.dumpToJSON() for node in self], meta=self.meta) class UnsolvedError(Exception): pass import re def __escape__(tk): if tk.startswith('R:'): return tk[2:] else: return re.escape(tk)
import warnings from copy import deepcopy warnings.filterwarnings("ignore") class ObjectUsageError(Exception): pass class CheckConditionError(Exception): pass class UnsolvedError(Exception): pass def handle_error(parser): func = parser.match history = (0, parser.name) def _f(objs, meta=None, partial=True): if not meta: raise CheckConditionError("Meta Information not defined yet!") res = func(objs, meta=meta) if res is None: c = meta.count r = meta.rdx for ch in objs[c:]: if ch is '\n': r += 1 c += 1 continue break info = " ".join(objs[c:c + 10]) if len(objs) > c + 10: info += '...' raise SyntaxError(''' Syntax Error at row {r} Error startswith : {info} '''.format(r=r, info=info)) else: if not partial and len(objs) != meta.count: warnings.warn("Parsing unfinished.") c = meta.count r = meta.rdx for ch in objs[c:]: if ch is '\n': r += 1 c += 1 continue break info = " ".join(objs[c:c + 10]) if len(objs) > c + 10: info += '...' raise SyntaxError(''' Syntax Error at row {r} Error startswith : {info} '''.format(r=r, info=info)) return res return _f # ====== Define Generic Type Params ============= WarningInfo = """ You're trying to visit the elems that've been deprecated. If it occurred when you're using EBNFParser, report it as a BUG at `https://github.com/thautwarm/EBNFParser`. Thanks a lot! """ # ====== Undef = None class Const: def __new__(self): raise ObjectUsageError("You're trying to new an instance with a module.") UnMatched = None NameFilter = 0 RawFilter = 1 RegexFilter = 2 class RecursiveFound(Exception): def __init__(self, node): self.node = node self.possibilities = [] def add(self, possibility): self.possibilities.append(possibility) def __str__(self): s = '=====\n' s += self.node.name + '\n' s += '\n'.join(a.name + ' | ' + str([c.name for c in b]) for a, b in self.possibilities) return s class Recur: def __new__(self, name, count): return (name, count) class Trace: def __init__(self, trace=Undef, length=Undef): self.length = length if length is not Undef else \ len(trace) if trace is not Undef else \ 0 self.content = trace if trace is not Undef else \ [] self._Mem = len(self.content) def __iter__(self): yield from [elem for elem in self.content[:self.length]] def __getitem__(self, item): if isinstance(item, int): if item >= self.length: warnings.warn(WarningInfo) return self.content[item] elif isinstance(item, slice): if item.stop > self.length: warnings.warn(WarningInfo) return self.content[item] def append(self, elem): # reuse the memory cache if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(elem) elif self.length < self._Mem: self.content[self.length] = elem self.length += 1 def new(self, constructor): # just can be used for Type `Trace[Contrainer[T]]` # reuse the memory cache if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(constructor()) elif self.length < self._Mem: self.content[self.length].length = 0 self.length += 1 def pop(self): self.length -= 1 assert self.length >= 0 def where(self, obj): for idx, elem in enumerate(self.content[:self.length]): if elem is obj: return idx return Undef # ============================= # Pattern Matching # ============================= def Generate_RegexPatten_From(mode, escape=False): import re return re.compile(re.escape(mode)) if escape else re.compile(mode) # The Reason why to repeat myself at the following three functions. # `Match_Char_By` `Match_Without_Regex_By`, `Match_With_Regex_By` """ This pattern is the key to speed up the parser framework. For sure, I can abstract the way how the input string compares with the parser's memeber `mode`, and then, these codes `if value == self.mode` `self.mode.fullmatch(value)` can be unified to `if someFunc(value, mode)` or `if someFunc(value, self)` However, abstraction can have costly results. """ def Match_Char_By(self): def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if value is self.mode: if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched return match def Match_Without_Regex_By(self): def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if value == self.mode: if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched return match def Match_With_Regex_By(self): def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if self.mode.fullmatch(value): if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched return match # ============================= # Parser # ============================= def analyze(ebnf): if len(ebnf) is 1 or not all(ebnf): return None groups = dict() groupOrder = [] for case in ebnf: groupId = case[0].name if groupId not in groupOrder: groups[groupId] = [case] groupOrder.append(groupId) else: groups[groupId].append(case) if len(groupOrder) is 1: return None return groups, groupOrder def grammarRemake(groups, groupOrder): return [([groups[groupId][0][0], DependentAstParser( *[case[1:] for case in groups[groupId]])] if len(groups[groupId]) > 1 else groups[groupId][0]) for groupId in groupOrder] def optimize(ebnf): analyzed = analyze(ebnf) if analyzed is None: return ebnf groups, groupOrder = analyzed return grammarRemake(groups, groupOrder) class Ignore: Value = 0 Name = 1 class BaseParser: name = Undef has_recur = Undef def match(self, objs, meta, recur=Undef): raise Exception("There is no access to an abstract method.") # incomplete class CharParser(BaseParser): def __init__(self, mode, name=Undef): length = len(mode) assert length is 1 self.mode = mode self.name = "'{MODE}'".format(MODE=mode) if name is Undef else name self.match = Match_Char_By(self) class StringParser(BaseParser): def __init__(self, mode, name=Undef, isRegex=False, ifIsRegexThenEscape=False): self.name = name if name is not Undef else "'{MODE}'".format(MODE=mode) self.isRegex = isRegex if isRegex: self.mode = Generate_RegexPatten_From(mode, escape=ifIsRegexThenEscape) self.match = Match_With_Regex_By(self) else: self.mode = mode self.match = Match_Without_Regex_By(self) class NotIn(StringParser): def __init__(self, mode, name=Undef): self.name = name if name is not Undef else "'{MODE}'".format(MODE=mode) self.mode = mode def match(objs, meta, recur=None): try: value = objs[meta.count] except IndexError: return Const.UnMatched if value not in self.mode: if value is '\n': meta.rdx += 1 meta.new() return value return Const.UnMatched self.match = match class Ref(BaseParser): def __init__(self, name): self.name = name class AstParser(BaseParser): def __init__(self, *ebnf, name=Undef, toIgnore=Undef): # each in the cache will be processed into a parser. self.cache = optimize(ebnf) # the possible output types for an series of input tokenized words. self.possibilities = [] # whether this parser will refer to itself. self.has_recur = False # the identity of a parser. self.name = name if name is not Undef else \ ' | '.join(' '.join(map(lambda parser: parser.name, ebnf_i)) for ebnf_i in ebnf) # is this parser compiled, must be False when initializing. self.compiled = False # if a parser's name is in this set, the result it output will be ignored when parsing. self.toIgnore = toIgnore def compile(self, namespace, recurSearcher): if self.name in recurSearcher: self.has_recur = True self.compiled = True else: recurSearcher.add(self.name) if self.compiled: return self for es in self.cache: self.possibilities.append([]) for e in es: if isinstance(e, StringParser) or \ isinstance(e, CharParser): self.possibilities[-1].append(e) elif isinstance(e, Ref): e = namespace[e.name] if isinstance(e, AstParser): e.compile(namespace, recurSearcher) self.possibilities[-1].append(e) if not self.has_recur and e.has_recur: self.has_recur = True elif isinstance(e, AstParser): if e.name not in namespace: namespace[e.name] = e else: e = namespace[e.name] e.compile(namespace, recurSearcher) self.possibilities[-1].append(e) if not self.has_recur and e.has_recur: self.has_recur = True else: print(e) raise UnsolvedError("Unknown Parser Type.") if hasattr(self, 'cache'): del self.cache if self.name in recurSearcher: recurSearcher.remove(self.name) if not self.compiled: self.compiled = True def match(self, objs, meta, recur=Undef): if self.has_recur and self in meta.trace[meta.count]: if isinstance(self, SeqParser) or recur is self: return Const.UnMatched raise RecursiveFound(self) meta.branch() if self.has_recur: meta.trace[meta.count].append(self) for possibility in self.possibilities: meta.branch() result = self.patternMatch(objs, meta, possibility, recur=recur) if result is Const.UnMatched: meta.rollback() continue elif isinstance(result, Ast): meta.pull() break elif isinstance(result, RecursiveFound): meta.rollback() break meta.pull() return result def patternMatch(self, objs, meta, possibility, recur=Undef): try: # Not recur result = Ast(meta.clone(), self.name) for parser in possibility: r = parser.match(objs, meta=meta, recur=recur) # if `result` is still empty, it might not allow LR now. if isinstance(r, str) or isinstance(r, Ast): resultMerge(result, r, parser, self.toIgnore) elif r is Const.UnMatched: return Const.UnMatched elif isinstance(r, RecursiveFound): raise r else: raise UnsolvedError("Unsolved return type. {}".format(r.__class__)) else: return result except RecursiveFound as RecurInfo: RecurInfo.add((self, possibility[possibility.index(parser) + 1:])) # RecurInfo has a trace of Beginning Recur Node to Next Recur Node with # specific possibility. if RecurInfo.node is not self: return RecurInfo return leftRecursion(objs, meta, possibility, RecurInfo) def resultMerge(result, r, parser, toIgnore): if isinstance(parser, SeqParser) or isinstance(parser, DependentAstParser): if toIgnore is Undef: result.extend(r) else: result.extend([item for item in r if ((item not in toIgnore[Const.RawFilter]) if isinstance(item, str) else (item.name not in toIgnore[Const.NameFilter]))]) else: if toIgnore is Undef: result.append(r) else: if isinstance(r, str): if r not in toIgnore[Const.RawFilter]: result.append(r) elif r.name not in toIgnore[Const.NameFilter]: result.append(r) def leftRecursion(objs, meta, RecurCase, RecurInfo): recur = RecurInfo.node for case in recur.possibilities: if case is RecurCase: continue meta.branch() veryFirst = recur.patternMatch(objs, meta, case, recur=recur) if isinstance(veryFirst, RecursiveFound) or veryFirst is Const.UnMatched: meta.rollback() continue else: meta.pull() first = veryFirst recurDeepCount = 0 while True: meta.branch() for parser, possibility in RecurInfo.possibilities: result = parser.patternMatch(objs, meta, possibility, recur=recur) if result is Const.UnMatched: meta.rollback() return Const.UnMatched if recurDeepCount is 0 else veryFirst elif isinstance(result, Ast): result.appendleft(first) elif isinstance(result, RecursiveFound): raise UnsolvedError("Error occurs : found a new left recursion when handling an other.") else: raise UnsolvedError("Unsolved return from method `patternMatch`.") first = result recurDeepCount += 1 meta.pull() veryFirst = first else: # Fail to match any case. return Const.UnMatched class DependentAstParser(AstParser): pass class SeqParser(AstParser): def __init__(self, *ebnf, name=Undef, atleast=0, atmost=Undef): super(SeqParser, self).__init__(*ebnf, name=name) if atmost is Undef: if atleast is 0: self.name = "({NAME})*".format(NAME=self.name) else: self.name = '({NAME}){{{AT_LEAST}}}'.format(NAME=self.name, AT_LEAST=atleast) else: self.name = "({NAME}){{{AT_LEAST},{AT_MOST}}}".format( NAME=self.name, AT_LEAST=atleast, AT_MOST=atmost) self.atleast = atleast self.atmost = atmost def match(self, objs, meta, recur=Undef): result = Ast(meta.clone(), self.name) if meta.count == len(objs): # boundary cases if self.atleast is 0: return result return Const.UnMatched meta.branch() matchedNum = 0 if self.atmost is not Undef: """ (ast){a b} """ while True: if matchedNum >= self.atmost: break try: r = super(SeqParser, self).match(objs, meta=meta, recur=recur) except IndexError: break if r is Const.UnMatched: break elif isinstance(r, RecursiveFound): raise UnsolvedError("Cannot make left recursions in SeqParser!!!") result.extend(r) matchedNum += 1 else: """ ast{a} | [ast] | ast* """ while True: try: r = super(SeqParser, self).match(objs, meta=meta, recur=recur) except IndexError: break if r is Const.UnMatched: break elif isinstance(r, RecursiveFound): raise UnsolvedError("Cannot make left recursions in SeqParser!!!") result.extend(r) matchedNum += 1 if matchedNum < self.atleast: meta.rollback() return Const.UnMatched meta.pull() return result class MetaInfo: def __init__(self, count=0, rdx=0, trace=None, file_name=None): self.count = count if trace: self.trace = trace else: self.trace = Trace() self.trace.append(Trace()) self.rdx = rdx self.history = [] self.fileName = file_name if file_name else "<input>" def new(self): self.count += 1 self.trace.new(Trace) def branch(self): """ Save a record of parsing history in order to trace back. """ self.history.append((self.count, self.rdx, self.trace[self.count].length)) def rollback(self): """ Trace back. """ try: count, rdx, length = self.history.pop() except IndexError: return None self.count = count self.rdx = rdx self.trace.length = count + 1 self.trace[count].length = length def pull(self): """ Confirm the current parsing results. Pop a record in parsing history. """ try: self.history.pop() except IndexError: raise Exception("pull no thing") def clone(self): """ Get a copy of (RowIdx, NumberOfParsedWords, FileName) from current meta information. """ return self.rdx, self.count, self.fileName def __str__(self): return """ -------------------- COUNT : {COUNT} ROW_IDX : {ROW_DIX} TRACE : {TRACE} -------------------- """.format(COUNT=self.count, ROW_DIX=self.rdx, TRACE='\n'.join( ['[' + (','.join([item.name for item in unit])) + ']' for unit in self.trace]) ) class Trace: def __init__(self, trace=None, length=None): self.length = length if length is not None else \ len(trace) if trace is not None else \ 0 self.content = trace if trace is not None else \ [] self._Mem = len(self.content) def __iter__(self): yield from [elem for elem in self.content[:self.length]] def __getitem__(self, item): if isinstance(item, int): if item >= self.length: warnings.warn("....") return self.content[item] elif isinstance(item, slice): if item.stop > self.length: warnings.warn("....") return self.content[item] def append(self, elem): if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(elem) elif self.length < self._Mem: self.content[self.length] = elem self.length += 1 def new(self, constructor): if self.length == self._Mem: self.length += 1 self._Mem += 1 self.content.append(constructor()) elif self.length < self._Mem: self.content[self.length].length = 0 self.length += 1 def pop(self): self.length -= 1 assert self.length >= 0 def where(self, obj): for idx, elem in enumerate(self.content[:self.length]): if elem is obj: return idx INDENT_UNIT = ' ' * 4 class Ast(list): def __init__(self, meta, name): super(Ast, self).__init__() self.name = name self.meta = meta def appendleft(self, obj): self.reverse() self.append(obj) self.reverse() def __str__(self): return self.dump() def dump(self, indent=0): next_indent = indent + 1 return """{INDENT}{NAME}[ {CONTENT} {INDENT}]""".format(INDENT=INDENT_UNIT * indent, NAME=self.name, CONTENT='\n'.join( [ "{NEXT_INDENT}\"{STR}\"".format(NEXT_INDENT=INDENT_UNIT * next_indent, STR=node) if isinstance(node, str) else \ node.dump(next_indent) for node in self ])) def dumpToJSON(self): return dict(name=self.name, value=[node if isinstance(node, str) else \ node.dumpToJSON() for node in self], meta=self.meta) class UnsolvedError(Exception): pass import re def __escape__(tk): if tk.startswith('R:'): return tk[2:] else: return re.escape(tk)
en
0.767974
Syntax Error at row {r} Error startswith : {info} Syntax Error at row {r} Error startswith : {info} # ====== Define Generic Type Params ============= You're trying to visit the elems that've been deprecated. If it occurred when you're using EBNFParser, report it as a BUG at `https://github.com/thautwarm/EBNFParser`. Thanks a lot! # ====== # reuse the memory cache # just can be used for Type `Trace[Contrainer[T]]` # reuse the memory cache # ============================= # Pattern Matching # ============================= # The Reason why to repeat myself at the following three functions. # `Match_Char_By` `Match_Without_Regex_By`, `Match_With_Regex_By` This pattern is the key to speed up the parser framework. For sure, I can abstract the way how the input string compares with the parser's memeber `mode`, and then, these codes `if value == self.mode` `self.mode.fullmatch(value)` can be unified to `if someFunc(value, mode)` or `if someFunc(value, self)` However, abstraction can have costly results. # ============================= # Parser # ============================= # incomplete # each in the cache will be processed into a parser. # the possible output types for an series of input tokenized words. # whether this parser will refer to itself. # the identity of a parser. # is this parser compiled, must be False when initializing. # if a parser's name is in this set, the result it output will be ignored when parsing. # Not recur # if `result` is still empty, it might not allow LR now. # RecurInfo has a trace of Beginning Recur Node to Next Recur Node with # specific possibility. # Fail to match any case. # boundary cases (ast){a b} ast{a} | [ast] | ast* Save a record of parsing history in order to trace back. Trace back. Confirm the current parsing results. Pop a record in parsing history. Get a copy of (RowIdx, NumberOfParsedWords, FileName) from current meta information. -------------------- COUNT : {COUNT} ROW_IDX : {ROW_DIX} TRACE : {TRACE} -------------------- {INDENT}{NAME}[ {CONTENT} {INDENT}]
2.293912
2
services/director/tests/test_oas.py
oetiker/osparc-simcore
0
6617610
<filename>services/director/tests/test_oas.py<gh_stars>0 import yaml import pytest from openapi_spec_validator import validate_spec from openapi_spec_validator.exceptions import ( OpenAPIValidationError ) from simcore_service_director import resources API_VERSIONS = resources.listdir(resources.RESOURCE_OPENAPI_ROOT) @pytest.mark.parametrize('version', API_VERSIONS) def test_openapi_specs(version): name = "{root}/{version}/openapi.yaml".format(root=resources.RESOURCE_OPENAPI_ROOT, version=version) openapi_path = resources.get_path(name) with resources.stream(name) as fh: specs = yaml.safe_load(fh) try: validate_spec(specs, spec_url=openapi_path.as_uri()) except OpenAPIValidationError as err: pytest.fail(err.message) @pytest.mark.parametrize('version', API_VERSIONS) def test_server_specs(version): name = "{root}/{version}/openapi.yaml".format(root=resources.RESOURCE_OPENAPI_ROOT, version=version) with resources.stream(name) as fh: specs = yaml.safe_load(fh) # client-sdk current limitation # - hooks to first server listed in oas default_server = specs['servers'][0] assert default_server['url']=='http://{host}:{port}/{version}', "Invalid convention"
<filename>services/director/tests/test_oas.py<gh_stars>0 import yaml import pytest from openapi_spec_validator import validate_spec from openapi_spec_validator.exceptions import ( OpenAPIValidationError ) from simcore_service_director import resources API_VERSIONS = resources.listdir(resources.RESOURCE_OPENAPI_ROOT) @pytest.mark.parametrize('version', API_VERSIONS) def test_openapi_specs(version): name = "{root}/{version}/openapi.yaml".format(root=resources.RESOURCE_OPENAPI_ROOT, version=version) openapi_path = resources.get_path(name) with resources.stream(name) as fh: specs = yaml.safe_load(fh) try: validate_spec(specs, spec_url=openapi_path.as_uri()) except OpenAPIValidationError as err: pytest.fail(err.message) @pytest.mark.parametrize('version', API_VERSIONS) def test_server_specs(version): name = "{root}/{version}/openapi.yaml".format(root=resources.RESOURCE_OPENAPI_ROOT, version=version) with resources.stream(name) as fh: specs = yaml.safe_load(fh) # client-sdk current limitation # - hooks to first server listed in oas default_server = specs['servers'][0] assert default_server['url']=='http://{host}:{port}/{version}', "Invalid convention"
en
0.821604
# client-sdk current limitation # - hooks to first server listed in oas
2.060288
2
wavelet.py
jeffli678/D4-wavelet
0
6617611
import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle, Rectangle import logging import math # import pywt def read_image(img_name): img = plt.imread(img_name).astype(float) logging.debug(img.shape) return img def show_img(img, file_name = None): plt.imshow(img, cmap = 'gray') if not file_name: plt.show() else: plt.savefig('output/' + file_name) h = np.array([1 + math.sqrt(3), 3 + math.sqrt(3), 3 - math.sqrt(3), \ 1 - math.sqrt(3)]) / (4 * math.sqrt(2)) g = np.array([h[3], -h[2], h[1], -h[0]]) def D4_1D(input_list, compress = False): odd_padded = False if len(input_list) % 2 == 1: odd_padded = True # repeat the last pixel at the right boundary # note we make a copy of the input list # because we do NOT want to change the length of it # len(input_padded) == n + 4 input_padded = [input_list[1], input_list[0]] input_padded.extend(input_list) input_padded.extend([input_list[-1], input_list[-2]]) if odd_padded: input_padded.append(input_list[-3]) n = len(input_padded) half = n / 2 - 1 # print(half) high_pass = [0] * half low_pass = [0] * half for i in range(0, half): for j in range(4): high_pass[i] += h[j] * input_padded[2 * i + j] # if we are doing a compression, the low_pass will be left zeros if not compress: low_pass[i] += g[j] * input_padded[2 * i + j] return high_pass, low_pass, odd_padded def D4_1D_inv(high_pass, low_pass, odd_padded = False): h_inv = [h[2], g[2], h[0], g[0]] g_inv = [h[3], g[3], h[1], g[1]] n = len(high_pass) # len(input_padded) == 2 * n input_padded = [] for i in range(n): input_padded.append(high_pass[i]) input_padded.append(low_pass[i]) # print(input_padded) output_list = [0] * (2 * n - 2) for i in range(0, n - 1): for j in range(4): output_list[2 * i] += h_inv[j] * input_padded[2 * i + j] output_list[2 * i + 1] += g_inv[j] * input_padded[2 * i + j] if odd_padded: output_list = output_list[ : -1] return output_list def D4_2D(img, levels = 3): new_input = img.copy() results = [] for level in range(levels): res_img, row_odd_padded, col_odd_padded = D4_2D_one_level(new_input, level + 1) row, col = res_img.shape new_input = res_img[0 : row / 2, 0: col / 2] results.append([res_img, row_odd_padded, col_odd_padded]) return results def D4_2D_one_level(img, level): show_img(img, 'input-level-%d' % level) # for level in range(levels): row_wave_img = [] for row in img: high_pass, low_pass, row_odd_padded = D4_1D(row) row_wave_img.append(high_pass + low_pass) row_wave_img = np.array(row_wave_img) col_row_wave_img = [] # transpose to iterate over the columns for col in row_wave_img.transpose(): high_pass, low_pass, col_odd_padded = D4_1D(col) col_row_wave_img.append(high_pass + low_pass) # transpose back to its original arrangement col_row_wave_img = np.array(col_row_wave_img).transpose() show_img(col_row_wave_img, 'output-level-%d' % level) return col_row_wave_img, row_odd_padded, col_odd_padded def D4_2D_inv_one_level(img, row_odd_padded, col_odd_padded, \ level, decompress = False): col_inv_img = [] for col in img.transpose(): half = len(col) / 2 high_pass = col[0 : half] low_pass = col[half :] col_inv = D4_1D_inv(high_pass, low_pass, col_odd_padded) col_inv_img.append(col_inv) col_inv_img = np.array(col_inv_img).transpose() row_col_inv_img = [] for row in col_inv_img: half = len(row) / 2 high_pass = row[0 : half] low_pass = row[half :] row_inv = D4_1D_inv(high_pass, low_pass, row_odd_padded) row_col_inv_img.append(row_inv) row_col_inv_img = np.array(row_col_inv_img) if not decompress: file_name = 'invesre-level-%d' % level else: file_name = 'decompress-level-%d' % level show_img(row_col_inv_img, file_name) return row_col_inv_img def D4_2D_inv(wavelet_results, levels = 3): curr_level = levels - 1 next_image = wavelet_results[curr_level][0] row_odd_padded = wavelet_results[curr_level][1] col_odd_padded = wavelet_results[curr_level][2] while curr_level >= 0: level_inv = D4_2D_inv_one_level(next_image, row_odd_padded, \ col_odd_padded, level = curr_level + 1) if curr_level == 0: break rows, cols = level_inv.shape curr_level -= 1 next_image = wavelet_results[curr_level][0] row_odd_padded = wavelet_results[curr_level][1] col_odd_padded = wavelet_results[curr_level][2] next_image[ : rows, : cols] = level_inv def compress_image(img, levels = 2): results = D4_2D(img, levels = levels) res_img, _, _ = results[-1] rows, cols = res_img.shape img_wavelet = res_img[ : rows / 2, : cols / 2] return img_wavelet def decompress_image(img_wavelet, levels = 2): next_image = img_wavelet.copy() while levels > 0: rows, cols = next_image.shape img_wavelet_expanded = np.zeros([2 * rows, 2 * cols]) img_wavelet_expanded[: rows, : cols] = next_image next_image = D4_2D_inv_one_level(img_wavelet_expanded, \ False, False, levels, decompress = True) levels -= 1 return next_image def test_pywt(l = None, compress = False): print('\n\n') if l == None: l = [3, 7, 1, 1, -2, 5, 4, 6] print(l) cA, cD = pywt.dwt(l, 'db2') print(cA) print(cD) if compress: cD = [0] * len(cD) l_inv = pywt.idwt(cA, cD, 'db2') print(l_inv) w = pywt.Wavelet('db2') print(w.dec_hi) print(w.dec_lo) print(w.rec_hi) print(w.rec_lo) def test_1D(l = None, compress = False): if l == None: l = [3, 7, 1, 1, -2, 5, 4, 6] print(l) high_pass, low_pass, odd_padded = D4_1D(l, compress) print(high_pass) print(low_pass) l_inv = D4_1D_inv(high_pass, low_pass, odd_padded) print(l_inv) def main(): img = read_image('input.png') l = [3, 7, 1, 1, -2, 5, 4, 6] # test_1D(l) # test_pywt(l) # only handle the first channel img = img[ : , : , 0] print(img.shape) d4_3_level = D4_2D(img, levels = 3) D4_2D_inv(d4_3_level, levels = 3) img_wavelet = compress_image(img, levels = 2) # print(img_wavelet.shape) decompressed_img = decompress_image(img_wavelet, levels = 2) if __name__ == '__main__': main()
import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle, Rectangle import logging import math # import pywt def read_image(img_name): img = plt.imread(img_name).astype(float) logging.debug(img.shape) return img def show_img(img, file_name = None): plt.imshow(img, cmap = 'gray') if not file_name: plt.show() else: plt.savefig('output/' + file_name) h = np.array([1 + math.sqrt(3), 3 + math.sqrt(3), 3 - math.sqrt(3), \ 1 - math.sqrt(3)]) / (4 * math.sqrt(2)) g = np.array([h[3], -h[2], h[1], -h[0]]) def D4_1D(input_list, compress = False): odd_padded = False if len(input_list) % 2 == 1: odd_padded = True # repeat the last pixel at the right boundary # note we make a copy of the input list # because we do NOT want to change the length of it # len(input_padded) == n + 4 input_padded = [input_list[1], input_list[0]] input_padded.extend(input_list) input_padded.extend([input_list[-1], input_list[-2]]) if odd_padded: input_padded.append(input_list[-3]) n = len(input_padded) half = n / 2 - 1 # print(half) high_pass = [0] * half low_pass = [0] * half for i in range(0, half): for j in range(4): high_pass[i] += h[j] * input_padded[2 * i + j] # if we are doing a compression, the low_pass will be left zeros if not compress: low_pass[i] += g[j] * input_padded[2 * i + j] return high_pass, low_pass, odd_padded def D4_1D_inv(high_pass, low_pass, odd_padded = False): h_inv = [h[2], g[2], h[0], g[0]] g_inv = [h[3], g[3], h[1], g[1]] n = len(high_pass) # len(input_padded) == 2 * n input_padded = [] for i in range(n): input_padded.append(high_pass[i]) input_padded.append(low_pass[i]) # print(input_padded) output_list = [0] * (2 * n - 2) for i in range(0, n - 1): for j in range(4): output_list[2 * i] += h_inv[j] * input_padded[2 * i + j] output_list[2 * i + 1] += g_inv[j] * input_padded[2 * i + j] if odd_padded: output_list = output_list[ : -1] return output_list def D4_2D(img, levels = 3): new_input = img.copy() results = [] for level in range(levels): res_img, row_odd_padded, col_odd_padded = D4_2D_one_level(new_input, level + 1) row, col = res_img.shape new_input = res_img[0 : row / 2, 0: col / 2] results.append([res_img, row_odd_padded, col_odd_padded]) return results def D4_2D_one_level(img, level): show_img(img, 'input-level-%d' % level) # for level in range(levels): row_wave_img = [] for row in img: high_pass, low_pass, row_odd_padded = D4_1D(row) row_wave_img.append(high_pass + low_pass) row_wave_img = np.array(row_wave_img) col_row_wave_img = [] # transpose to iterate over the columns for col in row_wave_img.transpose(): high_pass, low_pass, col_odd_padded = D4_1D(col) col_row_wave_img.append(high_pass + low_pass) # transpose back to its original arrangement col_row_wave_img = np.array(col_row_wave_img).transpose() show_img(col_row_wave_img, 'output-level-%d' % level) return col_row_wave_img, row_odd_padded, col_odd_padded def D4_2D_inv_one_level(img, row_odd_padded, col_odd_padded, \ level, decompress = False): col_inv_img = [] for col in img.transpose(): half = len(col) / 2 high_pass = col[0 : half] low_pass = col[half :] col_inv = D4_1D_inv(high_pass, low_pass, col_odd_padded) col_inv_img.append(col_inv) col_inv_img = np.array(col_inv_img).transpose() row_col_inv_img = [] for row in col_inv_img: half = len(row) / 2 high_pass = row[0 : half] low_pass = row[half :] row_inv = D4_1D_inv(high_pass, low_pass, row_odd_padded) row_col_inv_img.append(row_inv) row_col_inv_img = np.array(row_col_inv_img) if not decompress: file_name = 'invesre-level-%d' % level else: file_name = 'decompress-level-%d' % level show_img(row_col_inv_img, file_name) return row_col_inv_img def D4_2D_inv(wavelet_results, levels = 3): curr_level = levels - 1 next_image = wavelet_results[curr_level][0] row_odd_padded = wavelet_results[curr_level][1] col_odd_padded = wavelet_results[curr_level][2] while curr_level >= 0: level_inv = D4_2D_inv_one_level(next_image, row_odd_padded, \ col_odd_padded, level = curr_level + 1) if curr_level == 0: break rows, cols = level_inv.shape curr_level -= 1 next_image = wavelet_results[curr_level][0] row_odd_padded = wavelet_results[curr_level][1] col_odd_padded = wavelet_results[curr_level][2] next_image[ : rows, : cols] = level_inv def compress_image(img, levels = 2): results = D4_2D(img, levels = levels) res_img, _, _ = results[-1] rows, cols = res_img.shape img_wavelet = res_img[ : rows / 2, : cols / 2] return img_wavelet def decompress_image(img_wavelet, levels = 2): next_image = img_wavelet.copy() while levels > 0: rows, cols = next_image.shape img_wavelet_expanded = np.zeros([2 * rows, 2 * cols]) img_wavelet_expanded[: rows, : cols] = next_image next_image = D4_2D_inv_one_level(img_wavelet_expanded, \ False, False, levels, decompress = True) levels -= 1 return next_image def test_pywt(l = None, compress = False): print('\n\n') if l == None: l = [3, 7, 1, 1, -2, 5, 4, 6] print(l) cA, cD = pywt.dwt(l, 'db2') print(cA) print(cD) if compress: cD = [0] * len(cD) l_inv = pywt.idwt(cA, cD, 'db2') print(l_inv) w = pywt.Wavelet('db2') print(w.dec_hi) print(w.dec_lo) print(w.rec_hi) print(w.rec_lo) def test_1D(l = None, compress = False): if l == None: l = [3, 7, 1, 1, -2, 5, 4, 6] print(l) high_pass, low_pass, odd_padded = D4_1D(l, compress) print(high_pass) print(low_pass) l_inv = D4_1D_inv(high_pass, low_pass, odd_padded) print(l_inv) def main(): img = read_image('input.png') l = [3, 7, 1, 1, -2, 5, 4, 6] # test_1D(l) # test_pywt(l) # only handle the first channel img = img[ : , : , 0] print(img.shape) d4_3_level = D4_2D(img, levels = 3) D4_2D_inv(d4_3_level, levels = 3) img_wavelet = compress_image(img, levels = 2) # print(img_wavelet.shape) decompressed_img = decompress_image(img_wavelet, levels = 2) if __name__ == '__main__': main()
en
0.689271
# import pywt # repeat the last pixel at the right boundary # note we make a copy of the input list # because we do NOT want to change the length of it # len(input_padded) == n + 4 # print(half) # if we are doing a compression, the low_pass will be left zeros # len(input_padded) == 2 * n # print(input_padded) # for level in range(levels): # transpose to iterate over the columns # transpose back to its original arrangement # test_1D(l) # test_pywt(l) # only handle the first channel # print(img_wavelet.shape)
3.013886
3
autodo/stage_two_dataset.py
MatthewScholefield/autodo
0
6617612
from collections import namedtuple from math import cos, sin, asin import cv2 import numpy as np import random from os.path import join from prettyparse import Usage from torch.utils.data import Dataset as TorchDataset from autodo.dataset import Dataset StageTwoLabel = namedtuple('StageTwoLabel', 'yaw pitch roll center_x center_y') class StageTwoDataset(TorchDataset): usage = Usage(''' :cropped_folder str Folder of cropped images :dataset_folder str Dataset folder :-nd --network-data str - Network output csv ''') @classmethod def from_args(cls, args, train=True): return cls(args.cropped_folder, args.dataset_folder, args.network_data, train) def __init__(self, cropped_folder, dataset_folder, network_data_file=None, train=True): super().__init__() self.cropped_folder = cropped_folder self.dataset = Dataset.from_folder(dataset_folder) if network_data_file: import pandas df = pandas.read_csv(network_data_file) self.network_data = {} for a, b in df.groupby('image_id'): self.network_data[a] = b else: self.network_data = None self.datas = [] for image_id, boxes in self.dataset.labels_s.items(): if self.network_data: if image_id not in self.network_data: continue data = self.network_data[image_id] else: data = None self.datas.extend([ (image_id, box_id) for box_id in range(len(boxes)) if data is None or box_id < len(data) ]) random.seed(1234) random.shuffle(self.datas) cutoff = int(len(self.datas) * 0.7) if train: self.datas = self.datas[:cutoff] else: self.datas = self.datas[cutoff:] def __len__(self): return len(self.datas) def __getitem__(self, idx): image_id, box_id = self.datas[idx] label = self.dataset.labels_s[image_id][box_id] if self.network_data: row = self.network_data[image_id].iloc[box_id] box = [row[i] for i in ['xmin', 'ymin', 'xmax', 'ymax']] else: box = self.dataset.calc_bbox(label) xmin, ymin, xmax, ymax = box pred_filename = join(self.cropped_folder, image_id + '-{:02}.jpg'.format(box_id)) verts = self.dataset.project_verts(np.array([[0, 0, 0]]), label) xcenter, ycenter = verts[0] xmiddle = (xmax + xmin) / 2 ymiddle = (ymax + ymin) / 2 xcenter = (xcenter - xmiddle) / (max(xmax - xmin, ymax - ymin) / 2) ycenter = (ycenter - ymiddle) / (max(ymax - ymin, ymax - ymin) / 2) return self.load_image(pred_filename, box), (np.array([ cos(label.yaw) > 0, sin(label.yaw) * ((cos(label.yaw) > 0) * 2 - 1), label.pitch, rotate(label.roll, np.pi), xcenter, ycenter ])).astype('float32') @staticmethod def load_image(image_file, box): import matplotlib.pylab as plt img = plt.imread(image_file).astype('float32') / 255 height, width, channels = img.shape mesh = get_numpy_mesh(height, width, box) img = np.concatenate([img, mesh], axis=2) img = pad_img(img, size=(112, 112, 5)) img = img.transpose((2, 0, 1)) return img.astype('float32') @staticmethod def decode_label(network_output): cos_dir, sin_val, pitch, roll_val, center_x, center_y = map(float, network_output) sin_val = max(-1.0, min(1.0, sin_val)) return StageTwoLabel( asin(sin_val * ((cos_dir > 0.5) * 2 - 1)), pitch, rotate(roll_val, -np.pi), center_x, center_y ) def get_numpy_mesh(shape_y, shape_x, box): mesh = np.zeros([shape_y, shape_x, 2]) xmin, ymin, xmax, ymax = box mg_y, mg_x = np.meshgrid(np.linspace(ymin, ymax, shape_y), np.linspace(xmin, xmax, shape_x), indexing='ij') mesh[:, :, 0] = mg_y mesh[:, :, 1] = mg_x return mesh.astype('float32') def pad_img(img, size=(112, 112, 3)): padded_img = np.zeros(size) pad_center_y = size[0] / 2 pad_center_x = size[1] / 2 if img.shape[0] > img.shape[1]: newwidth = int(img.shape[1] / img.shape[0] * size[0] // 2 * 2) img = cv2.resize(img, (newwidth, size[0])) padded_img[:, int(pad_center_x - newwidth // 2):int(pad_center_x + newwidth // 2)] = img else: newheight = int(img.shape[0] / img.shape[1] * size[1] // 2 * 2) img = cv2.resize(img, (size[1], newheight)) padded_img[int(pad_center_y - newheight // 2):int(pad_center_y + newheight // 2), :] = img return padded_img def rotate(x, angle): x = x + angle x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi return x
from collections import namedtuple from math import cos, sin, asin import cv2 import numpy as np import random from os.path import join from prettyparse import Usage from torch.utils.data import Dataset as TorchDataset from autodo.dataset import Dataset StageTwoLabel = namedtuple('StageTwoLabel', 'yaw pitch roll center_x center_y') class StageTwoDataset(TorchDataset): usage = Usage(''' :cropped_folder str Folder of cropped images :dataset_folder str Dataset folder :-nd --network-data str - Network output csv ''') @classmethod def from_args(cls, args, train=True): return cls(args.cropped_folder, args.dataset_folder, args.network_data, train) def __init__(self, cropped_folder, dataset_folder, network_data_file=None, train=True): super().__init__() self.cropped_folder = cropped_folder self.dataset = Dataset.from_folder(dataset_folder) if network_data_file: import pandas df = pandas.read_csv(network_data_file) self.network_data = {} for a, b in df.groupby('image_id'): self.network_data[a] = b else: self.network_data = None self.datas = [] for image_id, boxes in self.dataset.labels_s.items(): if self.network_data: if image_id not in self.network_data: continue data = self.network_data[image_id] else: data = None self.datas.extend([ (image_id, box_id) for box_id in range(len(boxes)) if data is None or box_id < len(data) ]) random.seed(1234) random.shuffle(self.datas) cutoff = int(len(self.datas) * 0.7) if train: self.datas = self.datas[:cutoff] else: self.datas = self.datas[cutoff:] def __len__(self): return len(self.datas) def __getitem__(self, idx): image_id, box_id = self.datas[idx] label = self.dataset.labels_s[image_id][box_id] if self.network_data: row = self.network_data[image_id].iloc[box_id] box = [row[i] for i in ['xmin', 'ymin', 'xmax', 'ymax']] else: box = self.dataset.calc_bbox(label) xmin, ymin, xmax, ymax = box pred_filename = join(self.cropped_folder, image_id + '-{:02}.jpg'.format(box_id)) verts = self.dataset.project_verts(np.array([[0, 0, 0]]), label) xcenter, ycenter = verts[0] xmiddle = (xmax + xmin) / 2 ymiddle = (ymax + ymin) / 2 xcenter = (xcenter - xmiddle) / (max(xmax - xmin, ymax - ymin) / 2) ycenter = (ycenter - ymiddle) / (max(ymax - ymin, ymax - ymin) / 2) return self.load_image(pred_filename, box), (np.array([ cos(label.yaw) > 0, sin(label.yaw) * ((cos(label.yaw) > 0) * 2 - 1), label.pitch, rotate(label.roll, np.pi), xcenter, ycenter ])).astype('float32') @staticmethod def load_image(image_file, box): import matplotlib.pylab as plt img = plt.imread(image_file).astype('float32') / 255 height, width, channels = img.shape mesh = get_numpy_mesh(height, width, box) img = np.concatenate([img, mesh], axis=2) img = pad_img(img, size=(112, 112, 5)) img = img.transpose((2, 0, 1)) return img.astype('float32') @staticmethod def decode_label(network_output): cos_dir, sin_val, pitch, roll_val, center_x, center_y = map(float, network_output) sin_val = max(-1.0, min(1.0, sin_val)) return StageTwoLabel( asin(sin_val * ((cos_dir > 0.5) * 2 - 1)), pitch, rotate(roll_val, -np.pi), center_x, center_y ) def get_numpy_mesh(shape_y, shape_x, box): mesh = np.zeros([shape_y, shape_x, 2]) xmin, ymin, xmax, ymax = box mg_y, mg_x = np.meshgrid(np.linspace(ymin, ymax, shape_y), np.linspace(xmin, xmax, shape_x), indexing='ij') mesh[:, :, 0] = mg_y mesh[:, :, 1] = mg_x return mesh.astype('float32') def pad_img(img, size=(112, 112, 3)): padded_img = np.zeros(size) pad_center_y = size[0] / 2 pad_center_x = size[1] / 2 if img.shape[0] > img.shape[1]: newwidth = int(img.shape[1] / img.shape[0] * size[0] // 2 * 2) img = cv2.resize(img, (newwidth, size[0])) padded_img[:, int(pad_center_x - newwidth // 2):int(pad_center_x + newwidth // 2)] = img else: newheight = int(img.shape[0] / img.shape[1] * size[1] // 2 * 2) img = cv2.resize(img, (size[1], newheight)) padded_img[int(pad_center_y - newheight // 2):int(pad_center_y + newheight // 2), :] = img return padded_img def rotate(x, angle): x = x + angle x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi return x
en
0.37239
:cropped_folder str Folder of cropped images :dataset_folder str Dataset folder :-nd --network-data str - Network output csv
2.364586
2
Exercicios Loop/exercicio 14 - secao 06.py
cristinamais/exercicios_python
0
6617613
""" 14 - Faça um programa que leia um número inteiro positivo par N e imprima todos os números pares de 0 até N em ordem decrescente. """ n = int(input('Digite um número: ')) for par in range(n, 0, -1): print(2 * par)
""" 14 - Faça um programa que leia um número inteiro positivo par N e imprima todos os números pares de 0 até N em ordem decrescente. """ n = int(input('Digite um número: ')) for par in range(n, 0, -1): print(2 * par)
pt
0.913851
14 - Faça um programa que leia um número inteiro positivo par N e imprima todos os números pares de 0 até N em ordem decrescente.
3.785345
4
XGBoost_201805/XGBoost_one/learning_curve.py
yishantao/DailyPractice
0
6617614
# -*- coding:utf-8 -*- """This module is used to draw the learning curve""" import xgboost as xgb from xgboost import XGBClassifier # 加载LibSVM格式数据模块 from sklearn.datasets import load_svmlight_file from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from matplotlib import pyplot # read in data my_workpath = './data/' x_train, y_train = load_svmlight_file(my_workpath + 'agaricus.txt.train') x_test, y_test = load_svmlight_file(my_workpath + 'agaricus.txt.test') # print(x_train.shape) # split data into train and test sets,1/3的训练数据作为校验数据 seed = 7 test_size = 0.33 x_train_part, x_validate, y_train_part, y_validate = train_test_split(x_train, y_train, test_size=test_size, random_state=seed) # print(x_train_part.shape) # 设置boosting迭代计算次数 num_round = 100 bst = XGBClassifier(max_depth=2, learning_rate=0.1, n_estimators=num_round, silent=True, objective='binary:logistic') eval_set = [(x_train_part, y_train_part), (x_validate, y_validate)] bst.fit(x_train_part, y_train_part, eval_metric=['error', 'logloss'], eval_set=eval_set, verbose=True) # 显示学习曲线,retrieve performance metrics results = bst.evals_result() # print(results) epochs = len(results['validation_0']['error']) x_axis = range(0,epochs) # plot log loss fig, ax = pyplot.subplots() ax.plot(x_axis,results['validation_0']['logloss'],label='Train') ax.plot(x_axis,results['validation_1']['logloss'],label='Test') ax.legend() pyplot.ylabel('Log Loss') pyplot.title('XGBoost Log Loss') pyplot.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis,results['validation_0']['error'],label='Train') ax.plot(x_axis,results['validation_1']['error'],label='Test') ax.legend() pyplot.ylabel('Classification Error') pyplot.title('XGBoost Classification Error') pyplot.show() # 模型训练好后,可以用训练好的模型对测试数据进行预测 # make prediction preds = bst.predict(x_test) predictions = [round(value) for value in preds] test_accuracy = accuracy_score(y_test, predictions) print('Test Accuracy:%.2f%%' % (test_accuracy * 100.0))
# -*- coding:utf-8 -*- """This module is used to draw the learning curve""" import xgboost as xgb from xgboost import XGBClassifier # 加载LibSVM格式数据模块 from sklearn.datasets import load_svmlight_file from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from matplotlib import pyplot # read in data my_workpath = './data/' x_train, y_train = load_svmlight_file(my_workpath + 'agaricus.txt.train') x_test, y_test = load_svmlight_file(my_workpath + 'agaricus.txt.test') # print(x_train.shape) # split data into train and test sets,1/3的训练数据作为校验数据 seed = 7 test_size = 0.33 x_train_part, x_validate, y_train_part, y_validate = train_test_split(x_train, y_train, test_size=test_size, random_state=seed) # print(x_train_part.shape) # 设置boosting迭代计算次数 num_round = 100 bst = XGBClassifier(max_depth=2, learning_rate=0.1, n_estimators=num_round, silent=True, objective='binary:logistic') eval_set = [(x_train_part, y_train_part), (x_validate, y_validate)] bst.fit(x_train_part, y_train_part, eval_metric=['error', 'logloss'], eval_set=eval_set, verbose=True) # 显示学习曲线,retrieve performance metrics results = bst.evals_result() # print(results) epochs = len(results['validation_0']['error']) x_axis = range(0,epochs) # plot log loss fig, ax = pyplot.subplots() ax.plot(x_axis,results['validation_0']['logloss'],label='Train') ax.plot(x_axis,results['validation_1']['logloss'],label='Test') ax.legend() pyplot.ylabel('Log Loss') pyplot.title('XGBoost Log Loss') pyplot.show() # plot classification error fig, ax = pyplot.subplots() ax.plot(x_axis,results['validation_0']['error'],label='Train') ax.plot(x_axis,results['validation_1']['error'],label='Test') ax.legend() pyplot.ylabel('Classification Error') pyplot.title('XGBoost Classification Error') pyplot.show() # 模型训练好后,可以用训练好的模型对测试数据进行预测 # make prediction preds = bst.predict(x_test) predictions = [round(value) for value in preds] test_accuracy = accuracy_score(y_test, predictions) print('Test Accuracy:%.2f%%' % (test_accuracy * 100.0))
en
0.449531
# -*- coding:utf-8 -*- This module is used to draw the learning curve # 加载LibSVM格式数据模块 # read in data # print(x_train.shape) # split data into train and test sets,1/3的训练数据作为校验数据 # print(x_train_part.shape) # 设置boosting迭代计算次数 # 显示学习曲线,retrieve performance metrics # print(results) # plot log loss # plot classification error # 模型训练好后,可以用训练好的模型对测试数据进行预测 # make prediction
3.056597
3
testes.py
viniciusfm1/bitcointrade-api-client
3
6617615
from bitcointrade import Bitcointrade #careful, don't expose your api key exchange = Bitcointrade('BRLLTC','api_token') ticker = exchange.ticker() orders = exchange.orders() trades = exchange.trades('2019-01-01T00:00:00-03:00','2019-01-02T23:59:59-03:00',1) balance = exchange.balance() print(exchange.estimated_price(2,'buy'))
from bitcointrade import Bitcointrade #careful, don't expose your api key exchange = Bitcointrade('BRLLTC','api_token') ticker = exchange.ticker() orders = exchange.orders() trades = exchange.trades('2019-01-01T00:00:00-03:00','2019-01-02T23:59:59-03:00',1) balance = exchange.balance() print(exchange.estimated_price(2,'buy'))
en
0.95336
#careful, don't expose your api key
2.384679
2
hello.py
nikramakrishnan/cv-workshop
0
6617616
print("Hello world!") print("Line 2") print("This is my line 1") print("I'm the other person") def add(num1, num2): total = num1+num2 return total num1 = 200 num2 = 10 total = add(num1, num2) print("Sum of", num1, "and", num2, "is", total)
print("Hello world!") print("Line 2") print("This is my line 1") print("I'm the other person") def add(num1, num2): total = num1+num2 return total num1 = 200 num2 = 10 total = add(num1, num2) print("Sum of", num1, "and", num2, "is", total)
none
1
3.901671
4
app/controllers/v1/base.py
meysam81/sheypoor
0
6617617
import abc class BaseController(metaclass=abc.ABCMeta): pass
import abc class BaseController(metaclass=abc.ABCMeta): pass
none
1
1.632255
2
chrome/test/functional/media/pyauto_media.py
nagineni/chromium-crosswalk
231
6617618
<filename>chrome/test/functional/media/pyauto_media.py<gh_stars>100-1000 # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """PyAuto media test base. Handles PyAuto initialization and path setup. Required to ensure each media test can load the appropriate libraries. Each test must include this snippet: # This should be at the top import pyauto_media <test code> # This should be at the bottom. if __name__ == '__main__': pyauto_media.Main() """ import os import sys def _SetupPaths(): """Add paths required for loading PyAuto and other utilities to sys.path.""" media_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(media_dir) sys.path.append(os.path.normpath(os.path.join(media_dir, os.pardir))) # Add psutil library path. # TODO(dalecurtis): This should only be added for tests which use psutil. sys.path.append(os.path.normpath(os.path.join( media_dir, os.pardir, os.pardir, os.pardir, os.pardir, 'third_party', 'psutil'))) _SetupPaths() import pyauto_functional Main = pyauto_functional.Main
<filename>chrome/test/functional/media/pyauto_media.py<gh_stars>100-1000 # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """PyAuto media test base. Handles PyAuto initialization and path setup. Required to ensure each media test can load the appropriate libraries. Each test must include this snippet: # This should be at the top import pyauto_media <test code> # This should be at the bottom. if __name__ == '__main__': pyauto_media.Main() """ import os import sys def _SetupPaths(): """Add paths required for loading PyAuto and other utilities to sys.path.""" media_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(media_dir) sys.path.append(os.path.normpath(os.path.join(media_dir, os.pardir))) # Add psutil library path. # TODO(dalecurtis): This should only be added for tests which use psutil. sys.path.append(os.path.normpath(os.path.join( media_dir, os.pardir, os.pardir, os.pardir, os.pardir, 'third_party', 'psutil'))) _SetupPaths() import pyauto_functional Main = pyauto_functional.Main
en
0.809626
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. PyAuto media test base. Handles PyAuto initialization and path setup. Required to ensure each media test can load the appropriate libraries. Each test must include this snippet: # This should be at the top import pyauto_media <test code> # This should be at the bottom. if __name__ == '__main__': pyauto_media.Main() Add paths required for loading PyAuto and other utilities to sys.path. # Add psutil library path. # TODO(dalecurtis): This should only be added for tests which use psutil.
2.232418
2
SeleniumTest/Main/Pages/__init__.py
NayakwadiS/Selenium_Python_UnitTest_HTML
2
6617619
from Main.Pages.pgLogin import * from Main.Pages.pgHome import * from Main.Pages.pgCasualDress import * from Main.Pages.pgPrintedDress import * from Main.Pages.pgOrder import * from Main.Pages.pgTShirt import * from Main.Pages.pgFadedTShirts import * from Main.Pages.pgMyAccount import *
from Main.Pages.pgLogin import * from Main.Pages.pgHome import * from Main.Pages.pgCasualDress import * from Main.Pages.pgPrintedDress import * from Main.Pages.pgOrder import * from Main.Pages.pgTShirt import * from Main.Pages.pgFadedTShirts import * from Main.Pages.pgMyAccount import *
none
1
1.17661
1
tests/test_get_data.py
bw4sz/NeonTreeEvaluation_python
1
6617620
import pytest import os from src import get_data import numpy as np def test_find_path(monkeypatch): monkeypatch.setenv("NEONTREEEVALUATION_DIR", "{}/data/".format(os.path.dirname(__file__))) path = get_data.find_path(plot_name = "SJER_052", data_type ="rgb") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="lidar") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="annotations") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="chm") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="hyperspectral") assert os.path.exists(path) def test_xml_parse(monkeypatch): monkeypatch.setenv("NEONTREEEVALUATION_DIR", "{}/data/".format(os.path.dirname(__file__))) path = get_data.find_path(plot_name = "SJER_052", data_type ="annotations") df = get_data.xml_parse(path) assert np.array_equal(df.columns.values,np.array(["xmin","xmax","ymin","ymax","plot_name"]))
import pytest import os from src import get_data import numpy as np def test_find_path(monkeypatch): monkeypatch.setenv("NEONTREEEVALUATION_DIR", "{}/data/".format(os.path.dirname(__file__))) path = get_data.find_path(plot_name = "SJER_052", data_type ="rgb") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="lidar") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="annotations") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="chm") assert os.path.exists(path) path = get_data.find_path(plot_name = "SJER_052", data_type ="hyperspectral") assert os.path.exists(path) def test_xml_parse(monkeypatch): monkeypatch.setenv("NEONTREEEVALUATION_DIR", "{}/data/".format(os.path.dirname(__file__))) path = get_data.find_path(plot_name = "SJER_052", data_type ="annotations") df = get_data.xml_parse(path) assert np.array_equal(df.columns.values,np.array(["xmin","xmax","ymin","ymax","plot_name"]))
none
1
2.323048
2
src/model.py
Callet91/sw
0
6617621
import os import datetime import tensorflow as tf from tensorflow.keras import layers, models, optimizers, losses class ExampleModel: """Example model using Keras sequential modeling""" def __init__(self, config): self.MODEL = models.Sequential() self.CONFIG = config # Change the config file your model self.LOSS_FN = losses.SparseCategoricalCrossentropy(from_logits=True) def generate_model(self): """Generate example model original model: https://www.tensorflow.org/tutorials/quickstart/beginner """ # Here you add layers to your model self.MODEL.add(layers.Flatten(input_shape=(28, 28))) self.MODEL.add(layers.Dense(128, activation=self.CONFIG['activation'])) self.MODEL.add(layers.Dropout(0.2)) self.MODEL.add(layers.Dense(10)) def compile(self): """Compile model""" self.MODEL.compile( optimizer = self.CONFIG['optimizer'], loss = self.LOSS_FN, metrics = [self.CONFIG['metrics']] ) def train_model(self, data_train_x, data_train_y, data_val_x, data_val_y): """Train model""" history = self.MODEL.fit( x = data_train_x, y = data_train_y, epochs = self.CONFIG["epochs"], batch_size = self.CONFIG["batch"], validation_data = (data_val_x, data_val_y), shuffle = True ) return history def evaluate_model(self, data_test_x, data_test_y): """Evaluate model""" self.MODEL.evaluate( data_test_x, data_test_y, batch_size=32)
import os import datetime import tensorflow as tf from tensorflow.keras import layers, models, optimizers, losses class ExampleModel: """Example model using Keras sequential modeling""" def __init__(self, config): self.MODEL = models.Sequential() self.CONFIG = config # Change the config file your model self.LOSS_FN = losses.SparseCategoricalCrossentropy(from_logits=True) def generate_model(self): """Generate example model original model: https://www.tensorflow.org/tutorials/quickstart/beginner """ # Here you add layers to your model self.MODEL.add(layers.Flatten(input_shape=(28, 28))) self.MODEL.add(layers.Dense(128, activation=self.CONFIG['activation'])) self.MODEL.add(layers.Dropout(0.2)) self.MODEL.add(layers.Dense(10)) def compile(self): """Compile model""" self.MODEL.compile( optimizer = self.CONFIG['optimizer'], loss = self.LOSS_FN, metrics = [self.CONFIG['metrics']] ) def train_model(self, data_train_x, data_train_y, data_val_x, data_val_y): """Train model""" history = self.MODEL.fit( x = data_train_x, y = data_train_y, epochs = self.CONFIG["epochs"], batch_size = self.CONFIG["batch"], validation_data = (data_val_x, data_val_y), shuffle = True ) return history def evaluate_model(self, data_test_x, data_test_y): """Evaluate model""" self.MODEL.evaluate( data_test_x, data_test_y, batch_size=32)
en
0.622069
Example model using Keras sequential modeling # Change the config file your model Generate example model original model: https://www.tensorflow.org/tutorials/quickstart/beginner # Here you add layers to your model Compile model Train model Evaluate model
3.105973
3
app/terminal.py
JeanExtreme002/Keylogger
4
6617622
from terminal import Terminal import json import os import sys if __name__ == "__main__": if len(sys.argv) > 1: host = sys.argv[1] else: with open(os.path.join("config","config.json")) as file: data = json.loads(file.read()) host = data.get("host", "localhost") terminal = Terminal(host = host) terminal.run()
from terminal import Terminal import json import os import sys if __name__ == "__main__": if len(sys.argv) > 1: host = sys.argv[1] else: with open(os.path.join("config","config.json")) as file: data = json.loads(file.read()) host = data.get("host", "localhost") terminal = Terminal(host = host) terminal.run()
none
1
2.645211
3
Dataset/Leetcode/train/4/1023.py
kkcookies99/UAST
0
6617623
class Solution: def XXX(self, nums1: List[int], nums2: List[int]) -> float: c=nums1+nums2 c.sort() if len(c)%2==1: a=int(len(c)/2) return c[a] else: a=len(c)/2 b=a-1 return (c[int(a)]+c[int(b)])/2 undefined for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
class Solution: def XXX(self, nums1: List[int], nums2: List[int]) -> float: c=nums1+nums2 c.sort() if len(c)%2==1: a=int(len(c)/2) return c[a] else: a=len(c)/2 b=a-1 return (c[int(a)]+c[int(b)])/2 undefined for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
none
1
3.207151
3
src/pre_process.py
PyeongKim/pytorch-blood-cell-detection
0
6617624
import os import torch from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter import cv2 import xml.etree.ElementTree as ETree import torchvision.transforms.functional as TF cell_subtypes = ("RBC", "WBC", "Platelets") subtypes_map = {key: i+1 for i, key in enumerate(cell_subtypes)} distinct_colors = ['#3cb44b', '#ffe119', '#0082c8'] subtypes_color_map = {key: distinct_colors[i] for i, key in enumerate(subtypes_map)} def parse_annotation(xml_path): """ Args: xml_path (str): path to xml file Return: obejct_ (list): location of the ground truth bounding boxes and corresponding labels """ tree = ETree.parse(xml_path) root = tree.getroot() object_ = list() for cell in root.iter("object"): # get the tree with the heading "name" subtype = cell.find("name").text.strip() assert subtype in subtypes_map, "undefined label detected" box = cell.find("bndbox") xmin = int(box.find("xmin").text) xmax = int(box.find("xmax").text) ymin = int(box.find("ymin").text) ymax = int(box.find("ymax").text) label = subtypes_map[subtype] object_.append([xmin, ymin, xmax, ymax, label]) return object_ def normalization(image, mean, std): """ Args: image (numpy) : input image (H,W,C) mean (list) : mean of each channel of the image (R,G,B) std (list) : std of each channel of the image (R,G,B) Return: normalized_image (numpy) : normalized image (H,W,C) """ normalized_image = np.zeros(image.shape) for c in range(image.shape[-1]): normalized_image[:,:,c] = (image[:,:,c] - mean[c])/std[c] return normalized_image def resize_image(image, object_, target_size=448): """ Args: image (numpy array) : input image (H, W, C) object_ (list) : location of the ground truth bounding boxes and corresponding labels target_size (int) : target size to resize, we use 448x448 as default Return: resized_image (numpy array) : resized image (H, W, C) object_ (list) : location of the ground truth bounding boxes that are rescaled """ height, width = image.shape[:2] resized_image = cv2.resize(image,(target_size, target_size)) width_ratio, height_ratio = width/target_size, height/target_size for i in range(len(object_)): object_[i][0] = round(object_[i][0]*width_ratio) # xmin object_[i][2] = round(object_[i][2]*width_ratio) # xmax object_[i][1] = round(object_[i][1]*height_ratio) # ymin object_[i][3] = round(object_[i][3]*height_ratio) # ymax return resized_image, object_ def define_crop_range(image, crop_ratio): """ Args: image (numpy array): original image (H,W,C) crop_ratio (float): range from 0.9 to 1 Return: crop_location: x and y """ height, width = image.shape[:2] crop_height, crop_width = round(height*crop_ratio), round(width*crop_ratio) return (width - crop_width, height - crop_height), (crop_width, crop_height) def crop_image(image, object_, crop_size=(40, 80), crop_loc=(0, 0)): """ Crop the image: Args: image (numpy array) : numpy array (H,W,C) object (list): location of the bounding box and label (xmin, ymin, xmax, ymax, label) crop_size (tuple or list) : the desired crop size (W, H) crop_loc (tuple or list) : the desired location to crop (x, y) return: image_cropped (numpy array) : cropped image of specific demension (H,W,C) object_ (list) : location of the ground truth bounding boxes cropped """ # crop the image xmin, xmax = crop_loc[0], crop_loc[0]+crop_size[0] ymin, ymax = crop_loc[1], crop_loc[1]+crop_size[1] image_cropped = image[ymin:ymax, xmin:xmax] for i in range(len(object_)): # crop the bounding box object_[i][0] = max(object_[i][0], xmin) object_[i][1] = max(object_[i][1], ymin) object_[i][2] = min(object_[i][2], xmax) object_[i][3] = min(object_[i][3], ymax) return image_cropped, object_ def flip(image, option_value): """ Args: image : numpy array of image option_value = random integer between 0 to 3 Return : image : numpy array of flipped image """ if option_value == 0: # vertical image = np.flip(image, option_value) elif option_value == 1: # horizontal image = np.flip(image, option_value) elif option_value == 2: # horizontally and vertically flip image = np.flip(image, 0) image = np.flip(image, 1) else: image = image # no effect return image def add_elastic_transform(image, alpha, sigma): """ Args: image : numpy array of image alpha : α is a scaling factor sigma : σ is an elasticity coefficient random_state = random integer Return : : elastically transformed numpy array of image """ assert image.shape[-1] == 3, "the elastic transform does not support gray scale image" random_state = np.random.RandomState(None) pad_size = 20 image = cv2.copyMakeBorder(image, pad_size, pad_size, pad_size, pad_size, cv2.BORDER_REFLECT_101) shape = image.shape[:2] dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) transformed_img = np.empty_like(image) for i in range(image.shape[-1]): transformed_img[:, :, i] = map_coordinates(image[:, :, i], indices).reshape(shape) return transformed_img[pad_size:-pad_size, pad_size:-pad_size,:] def add_gaussian_noise(image, mean=0, std=1): """ Args: image : numpy array of image mean : pixel mean of image standard deviation : pixel standard deviation of image Return : image : numpy array of image with gaussian noise added """ gaus_noise = np.random.normal(mean, std, image.shape) image = image.astype("int16") noise_img = image + gaus_noise image = ceil_floor_image(image) return noise_img def add_uniform_noise(image, low=-10, high=10): """ Args: image : numpy array of image low : lower boundary of output interval high : upper boundary of output interval Return : image : numpy array of image with uniform noise added """ uni_noise = np.random.uniform(low, high, image.shape) image = image.astype("int16") noise_img = image + uni_noise image = ceil_floor_image(image) return noise_img def change_brightness(image, value): """ Args: image : numpy array of image value : brightness Return : image : numpy array of image with brightness added """ image = image.astype("int16") image = image + value image = ceil_floor_image(image) return image def ceil_floor_image(image): """ Args: image : numpy array of image in datatype int16 Return : image : numpy array of image in datatype uint8 with ceilling(maximum 255) and flooring(minimum 0) """ image[image > 255] = 255 image[image < 0] = 0 image = image.astype("uint8") return image
import os import torch from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter import cv2 import xml.etree.ElementTree as ETree import torchvision.transforms.functional as TF cell_subtypes = ("RBC", "WBC", "Platelets") subtypes_map = {key: i+1 for i, key in enumerate(cell_subtypes)} distinct_colors = ['#3cb44b', '#ffe119', '#0082c8'] subtypes_color_map = {key: distinct_colors[i] for i, key in enumerate(subtypes_map)} def parse_annotation(xml_path): """ Args: xml_path (str): path to xml file Return: obejct_ (list): location of the ground truth bounding boxes and corresponding labels """ tree = ETree.parse(xml_path) root = tree.getroot() object_ = list() for cell in root.iter("object"): # get the tree with the heading "name" subtype = cell.find("name").text.strip() assert subtype in subtypes_map, "undefined label detected" box = cell.find("bndbox") xmin = int(box.find("xmin").text) xmax = int(box.find("xmax").text) ymin = int(box.find("ymin").text) ymax = int(box.find("ymax").text) label = subtypes_map[subtype] object_.append([xmin, ymin, xmax, ymax, label]) return object_ def normalization(image, mean, std): """ Args: image (numpy) : input image (H,W,C) mean (list) : mean of each channel of the image (R,G,B) std (list) : std of each channel of the image (R,G,B) Return: normalized_image (numpy) : normalized image (H,W,C) """ normalized_image = np.zeros(image.shape) for c in range(image.shape[-1]): normalized_image[:,:,c] = (image[:,:,c] - mean[c])/std[c] return normalized_image def resize_image(image, object_, target_size=448): """ Args: image (numpy array) : input image (H, W, C) object_ (list) : location of the ground truth bounding boxes and corresponding labels target_size (int) : target size to resize, we use 448x448 as default Return: resized_image (numpy array) : resized image (H, W, C) object_ (list) : location of the ground truth bounding boxes that are rescaled """ height, width = image.shape[:2] resized_image = cv2.resize(image,(target_size, target_size)) width_ratio, height_ratio = width/target_size, height/target_size for i in range(len(object_)): object_[i][0] = round(object_[i][0]*width_ratio) # xmin object_[i][2] = round(object_[i][2]*width_ratio) # xmax object_[i][1] = round(object_[i][1]*height_ratio) # ymin object_[i][3] = round(object_[i][3]*height_ratio) # ymax return resized_image, object_ def define_crop_range(image, crop_ratio): """ Args: image (numpy array): original image (H,W,C) crop_ratio (float): range from 0.9 to 1 Return: crop_location: x and y """ height, width = image.shape[:2] crop_height, crop_width = round(height*crop_ratio), round(width*crop_ratio) return (width - crop_width, height - crop_height), (crop_width, crop_height) def crop_image(image, object_, crop_size=(40, 80), crop_loc=(0, 0)): """ Crop the image: Args: image (numpy array) : numpy array (H,W,C) object (list): location of the bounding box and label (xmin, ymin, xmax, ymax, label) crop_size (tuple or list) : the desired crop size (W, H) crop_loc (tuple or list) : the desired location to crop (x, y) return: image_cropped (numpy array) : cropped image of specific demension (H,W,C) object_ (list) : location of the ground truth bounding boxes cropped """ # crop the image xmin, xmax = crop_loc[0], crop_loc[0]+crop_size[0] ymin, ymax = crop_loc[1], crop_loc[1]+crop_size[1] image_cropped = image[ymin:ymax, xmin:xmax] for i in range(len(object_)): # crop the bounding box object_[i][0] = max(object_[i][0], xmin) object_[i][1] = max(object_[i][1], ymin) object_[i][2] = min(object_[i][2], xmax) object_[i][3] = min(object_[i][3], ymax) return image_cropped, object_ def flip(image, option_value): """ Args: image : numpy array of image option_value = random integer between 0 to 3 Return : image : numpy array of flipped image """ if option_value == 0: # vertical image = np.flip(image, option_value) elif option_value == 1: # horizontal image = np.flip(image, option_value) elif option_value == 2: # horizontally and vertically flip image = np.flip(image, 0) image = np.flip(image, 1) else: image = image # no effect return image def add_elastic_transform(image, alpha, sigma): """ Args: image : numpy array of image alpha : α is a scaling factor sigma : σ is an elasticity coefficient random_state = random integer Return : : elastically transformed numpy array of image """ assert image.shape[-1] == 3, "the elastic transform does not support gray scale image" random_state = np.random.RandomState(None) pad_size = 20 image = cv2.copyMakeBorder(image, pad_size, pad_size, pad_size, pad_size, cv2.BORDER_REFLECT_101) shape = image.shape[:2] dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) transformed_img = np.empty_like(image) for i in range(image.shape[-1]): transformed_img[:, :, i] = map_coordinates(image[:, :, i], indices).reshape(shape) return transformed_img[pad_size:-pad_size, pad_size:-pad_size,:] def add_gaussian_noise(image, mean=0, std=1): """ Args: image : numpy array of image mean : pixel mean of image standard deviation : pixel standard deviation of image Return : image : numpy array of image with gaussian noise added """ gaus_noise = np.random.normal(mean, std, image.shape) image = image.astype("int16") noise_img = image + gaus_noise image = ceil_floor_image(image) return noise_img def add_uniform_noise(image, low=-10, high=10): """ Args: image : numpy array of image low : lower boundary of output interval high : upper boundary of output interval Return : image : numpy array of image with uniform noise added """ uni_noise = np.random.uniform(low, high, image.shape) image = image.astype("int16") noise_img = image + uni_noise image = ceil_floor_image(image) return noise_img def change_brightness(image, value): """ Args: image : numpy array of image value : brightness Return : image : numpy array of image with brightness added """ image = image.astype("int16") image = image + value image = ceil_floor_image(image) return image def ceil_floor_image(image): """ Args: image : numpy array of image in datatype int16 Return : image : numpy array of image in datatype uint8 with ceilling(maximum 255) and flooring(minimum 0) """ image[image > 255] = 255 image[image < 0] = 0 image = image.astype("uint8") return image
en
0.647248
Args: xml_path (str): path to xml file Return: obejct_ (list): location of the ground truth bounding boxes and corresponding labels # get the tree with the heading "name" Args: image (numpy) : input image (H,W,C) mean (list) : mean of each channel of the image (R,G,B) std (list) : std of each channel of the image (R,G,B) Return: normalized_image (numpy) : normalized image (H,W,C) Args: image (numpy array) : input image (H, W, C) object_ (list) : location of the ground truth bounding boxes and corresponding labels target_size (int) : target size to resize, we use 448x448 as default Return: resized_image (numpy array) : resized image (H, W, C) object_ (list) : location of the ground truth bounding boxes that are rescaled # xmin # xmax # ymin # ymax Args: image (numpy array): original image (H,W,C) crop_ratio (float): range from 0.9 to 1 Return: crop_location: x and y Crop the image: Args: image (numpy array) : numpy array (H,W,C) object (list): location of the bounding box and label (xmin, ymin, xmax, ymax, label) crop_size (tuple or list) : the desired crop size (W, H) crop_loc (tuple or list) : the desired location to crop (x, y) return: image_cropped (numpy array) : cropped image of specific demension (H,W,C) object_ (list) : location of the ground truth bounding boxes cropped # crop the image # crop the bounding box Args: image : numpy array of image option_value = random integer between 0 to 3 Return : image : numpy array of flipped image # vertical # horizontal # horizontally and vertically flip # no effect Args: image : numpy array of image alpha : α is a scaling factor sigma : σ is an elasticity coefficient random_state = random integer Return : : elastically transformed numpy array of image Args: image : numpy array of image mean : pixel mean of image standard deviation : pixel standard deviation of image Return : image : numpy array of image with gaussian noise added Args: image : numpy array of image low : lower boundary of output interval high : upper boundary of output interval Return : image : numpy array of image with uniform noise added Args: image : numpy array of image value : brightness Return : image : numpy array of image with brightness added Args: image : numpy array of image in datatype int16 Return : image : numpy array of image in datatype uint8 with ceilling(maximum 255) and flooring(minimum 0)
2.18374
2
buskin/utils.py
nuwandavek/buskin
0
6617625
<gh_stars>0 import re, string from torch.nn.functional import softmax import numpy as np from fuzzywuzzy import fuzz from buskin.config import MIN_PAR_SENTENCES, MIN_FUZZ_SCORE, STOP_WORDS, EMOTIONS, REDUCED_EMOTIONS from buskin.entities import Character, Emotion, Sentence, TokenTags, Occurrence def convert_text_to_chunks(text, max_chunk_size): # split on newlines followed by space pars = re.split(r"\n\s", text) # Replace newline chars pars = [par.replace("\n", " ") for par in pars] # Remove empty pars pars = [par for par in pars if len(par) > 0] # Preprocess "paragraphs" that are actually quotes or single lined text final_pars = [] for p, paragraph in enumerate(pars): if paragraph.count(".") < MIN_PAR_SENTENCES: if p == 0: final_pars.append(paragraph) else: final_pars[-1] = final_pars[-1] + " " + paragraph else: final_pars.append(paragraph) final_chunks = [""] chunk_id = 0 par_id = 0 while par_id < len(final_pars): if len(final_chunks[chunk_id]) > max_chunk_size: chunk_id += 1 final_chunks.append("") final_chunks[chunk_id] = final_chunks[chunk_id] + " " + final_pars[par_id] par_id += 1 final_chunks = [(chunk, ch) for ch, chunk in enumerate(final_chunks)] return final_chunks def get_merged_characters(coref_dicts, max_fuzz=MIN_FUZZ_SCORE): characters = [] main_coref = {} for dict_ in coref_dicts: for k, v in dict_.items(): if k in main_coref: main_coref[k]["mentions"] += v["mentions"] main_coref[k]["agents"] += v["agents"] main_coref[k]["patients"] += v["patients"] main_coref[k]["preds"] += v["preds"] else: main_coref[k] = v merged_coref = {} char_counts = {} for k, v in main_coref.items(): added = 0 for merged_char in merged_coref.keys(): if fuzz.ratio(merged_char, k) > max_fuzz: merged_coref[merged_char]["mentions"] += v["mentions"] merged_coref[merged_char]["agents"] += v["agents"] merged_coref[merged_char]["patients"] += v["patients"] merged_coref[merged_char]["preds"] += v["preds"] added = 1 char_counts[merged_char] += len(v["mentions"]) break if added == 0: merged_coref[k] = v char_counts[k] = len(v["mentions"]) char_counts = [[k, char_counts[k]] for k in char_counts] char_counts = sorted(char_counts, key=lambda x: x[1], reverse=True) ranked_chars = [x[0] for x in char_counts] for char in merged_coref: rank = ranked_chars.index(char) + 1 character = Character( rank, char, merged_coref[char]["mentions"], merged_coref[char]["agents"], merged_coref[char]["patients"], merged_coref[char]["preds"], ) characters.append(character) characters = sorted(characters, key=lambda x: x.rank) return characters def generate_sentence_batches(sentences, batch_size): i = 0 while i * batch_size < len(sentences): subset = sentences[i * batch_size : min((i + 1) * batch_size, len(sentences))] subset = [[t.token for t in s.token_tags] for s in subset] i += 1 yield subset def parse_into_sentences_characters(chunk, nlp=None): text, par_id = chunk doc = nlp(text) # parse into sentences sentences = [] sentence_id_for_tokens = [] for s, sent in enumerate(doc.sents): tokens = doc[sent.start : sent.end] sentence_id_for_tokens += [s] * len(tokens) token_tags = [ TokenTags( i, token.i, token.text, token.lemma_, token.pos_, token.pos_, token.dep_, token.head.i, ) for i, token in enumerate(tokens) ] emotion_tags = Emotion(None, None, None) sentences.append( Sentence(s, par_id, sent.start, sent.text, token_tags, emotion_tags) ) corefs = {} if doc._.has_coref: for cluster in doc._.coref_clusters: # If an entry for this coref doesn't yet exist, create one main_name = denoise_string(cluster.main.text) if main_name in STOP_WORDS or main_name == "STOP_WORD": continue if not (main_name in corefs): corefs[main_name] = { "mentions": [], "agents": [], "patients": [], "preds": [], } # Update the entry with new mention and any parsed verbs or predicatives for mention in cluster.mentions: mention_name = denoise_string(mention.text) mention_sent = sentence_id_for_tokens[mention.start] corefs[main_name]["mentions"].append( Occurrence( mention_name, mention_sent, par_id, mention.start, mention.end ) ) agents, patients, preds = parse_sent_and_mention( sentences[mention_sent], mention, par_id ) corefs[main_name]["agents"] += agents corefs[main_name]["patients"] += patients corefs[main_name]["preds"] += preds return sentences, corefs def denoise_string(s): exclude = set(string.punctuation) s = s.lower() s = ''.join(ch for ch in s if ch not in exclude).strip() s = ' '.join([x for x in s.split(' ') if x not in STOP_WORDS]) if s =='': s = 'STOP_WORD' return s def parse_sent_and_mention(sent, mention, par_id): agents = [] patients = [] predicatives = [] # Iterate over tokens in the mention for token in mention: token_tag = sent.token_tags[token.i - sent.global_token_start] # If the token's dependency tag is nsubj, find it's parent and set the lemma of this word to # be an agent of this entity. if token_tag.dep == 'nsubj': idx = token_tag.head_global_id - sent.global_token_start agent_verb = sent.token_tags[idx].lemma agents.append(Occurrence(agent_verb, sent.sentence_id, par_id, idx, idx+1)) #print(" mention: ", mention, " token: ", token, " id ", token.i, "agent : ", agent_verb) # If the token's dependency tag is dobj or nsubjpass, find it's parent and set the lemma of this word to # be an patient of this entity. if (token_tag.dep == 'dobj') or (token_tag.dep == 'nsubjpass'): idx = token_tag.head_global_id - sent.global_token_start patient_verb = sent.token_tags[idx].lemma patients.append(Occurrence(patient_verb, sent.sentence_id, par_id, idx, idx+1)) #print(" mention: ", mention, " token: ", token, " id ", token.i, "patient : ", patient_verb) # Now we handle dependencies in the other direction to get predicatives. # 'man' is the predicative of 'Tim' in the sentence "Tim is a man." # Iterate over sentence tokens for token_tag in sent.token_tags: # Only consider tokens not in the mention: if not ((token_tag.token_global_id >= mention.start) and (token_tag.token_global_id <= mention.end)): # ignore punctuation if token_tag.pos != 'PUNCT': # Check if the parent of the word is a "be" verb (is, are, be, etc.) if sent.token_tags[token_tag.head_global_id - sent.global_token_start].lemma == "be": to_be_verb = sent.token_tags[token_tag.head_global_id - sent.global_token_start] # Check if the parent of the "be" verb is part of the mention if (to_be_verb.head_global_id >= mention.start) and (to_be_verb.head_global_id <= mention.end): idx = token_tag.token_global_id - sent.global_token_start pred_word = sent.token_tags[idx].lemma predicatives.append(Occurrence(pred_word, sent.sentence_id, par_id, idx, idx+1)) #print(" mention: ", mention, " token: ", token, " id ", token_tag.token_global_id, "predicative : ", pred_word) return agents, patients, predicatives def get_emotion_per_batch(batch, tokenizer, model): inputs = tokenizer(batch, is_split_into_words=True, return_tensors='pt', padding=True).to('cuda') outputs = model(**inputs) logits = outputs.logits probs = softmax(logits, dim=1).cpu().data.numpy() emotion_res = [EMOTIONS[x] for x in np.argmax(probs, axis=1)] emo_prob = list(np.max(probs, axis=1)) mini_emotion_res = [REDUCED_EMOTIONS[emotion] for emotion in emotion_res] return (emotion_res, mini_emotion_res, emo_prob) def merge_emotions_to_sentences(sentences, emotion_batches): emotions = [] mini_emotions = [] probs = [] for e,m,p in emotion_batches: emotions+=e mini_emotions+=m probs+=p assert len(emotions) == len(mini_emotions) assert len(emotions) == len(probs) assert len(emotions) == len(sentences) for i in range(len(sentences)): sentences[i].emotion_tags = Emotion(emotions[i], mini_emotions[i], float(probs[i])) return sentences
import re, string from torch.nn.functional import softmax import numpy as np from fuzzywuzzy import fuzz from buskin.config import MIN_PAR_SENTENCES, MIN_FUZZ_SCORE, STOP_WORDS, EMOTIONS, REDUCED_EMOTIONS from buskin.entities import Character, Emotion, Sentence, TokenTags, Occurrence def convert_text_to_chunks(text, max_chunk_size): # split on newlines followed by space pars = re.split(r"\n\s", text) # Replace newline chars pars = [par.replace("\n", " ") for par in pars] # Remove empty pars pars = [par for par in pars if len(par) > 0] # Preprocess "paragraphs" that are actually quotes or single lined text final_pars = [] for p, paragraph in enumerate(pars): if paragraph.count(".") < MIN_PAR_SENTENCES: if p == 0: final_pars.append(paragraph) else: final_pars[-1] = final_pars[-1] + " " + paragraph else: final_pars.append(paragraph) final_chunks = [""] chunk_id = 0 par_id = 0 while par_id < len(final_pars): if len(final_chunks[chunk_id]) > max_chunk_size: chunk_id += 1 final_chunks.append("") final_chunks[chunk_id] = final_chunks[chunk_id] + " " + final_pars[par_id] par_id += 1 final_chunks = [(chunk, ch) for ch, chunk in enumerate(final_chunks)] return final_chunks def get_merged_characters(coref_dicts, max_fuzz=MIN_FUZZ_SCORE): characters = [] main_coref = {} for dict_ in coref_dicts: for k, v in dict_.items(): if k in main_coref: main_coref[k]["mentions"] += v["mentions"] main_coref[k]["agents"] += v["agents"] main_coref[k]["patients"] += v["patients"] main_coref[k]["preds"] += v["preds"] else: main_coref[k] = v merged_coref = {} char_counts = {} for k, v in main_coref.items(): added = 0 for merged_char in merged_coref.keys(): if fuzz.ratio(merged_char, k) > max_fuzz: merged_coref[merged_char]["mentions"] += v["mentions"] merged_coref[merged_char]["agents"] += v["agents"] merged_coref[merged_char]["patients"] += v["patients"] merged_coref[merged_char]["preds"] += v["preds"] added = 1 char_counts[merged_char] += len(v["mentions"]) break if added == 0: merged_coref[k] = v char_counts[k] = len(v["mentions"]) char_counts = [[k, char_counts[k]] for k in char_counts] char_counts = sorted(char_counts, key=lambda x: x[1], reverse=True) ranked_chars = [x[0] for x in char_counts] for char in merged_coref: rank = ranked_chars.index(char) + 1 character = Character( rank, char, merged_coref[char]["mentions"], merged_coref[char]["agents"], merged_coref[char]["patients"], merged_coref[char]["preds"], ) characters.append(character) characters = sorted(characters, key=lambda x: x.rank) return characters def generate_sentence_batches(sentences, batch_size): i = 0 while i * batch_size < len(sentences): subset = sentences[i * batch_size : min((i + 1) * batch_size, len(sentences))] subset = [[t.token for t in s.token_tags] for s in subset] i += 1 yield subset def parse_into_sentences_characters(chunk, nlp=None): text, par_id = chunk doc = nlp(text) # parse into sentences sentences = [] sentence_id_for_tokens = [] for s, sent in enumerate(doc.sents): tokens = doc[sent.start : sent.end] sentence_id_for_tokens += [s] * len(tokens) token_tags = [ TokenTags( i, token.i, token.text, token.lemma_, token.pos_, token.pos_, token.dep_, token.head.i, ) for i, token in enumerate(tokens) ] emotion_tags = Emotion(None, None, None) sentences.append( Sentence(s, par_id, sent.start, sent.text, token_tags, emotion_tags) ) corefs = {} if doc._.has_coref: for cluster in doc._.coref_clusters: # If an entry for this coref doesn't yet exist, create one main_name = denoise_string(cluster.main.text) if main_name in STOP_WORDS or main_name == "STOP_WORD": continue if not (main_name in corefs): corefs[main_name] = { "mentions": [], "agents": [], "patients": [], "preds": [], } # Update the entry with new mention and any parsed verbs or predicatives for mention in cluster.mentions: mention_name = denoise_string(mention.text) mention_sent = sentence_id_for_tokens[mention.start] corefs[main_name]["mentions"].append( Occurrence( mention_name, mention_sent, par_id, mention.start, mention.end ) ) agents, patients, preds = parse_sent_and_mention( sentences[mention_sent], mention, par_id ) corefs[main_name]["agents"] += agents corefs[main_name]["patients"] += patients corefs[main_name]["preds"] += preds return sentences, corefs def denoise_string(s): exclude = set(string.punctuation) s = s.lower() s = ''.join(ch for ch in s if ch not in exclude).strip() s = ' '.join([x for x in s.split(' ') if x not in STOP_WORDS]) if s =='': s = 'STOP_WORD' return s def parse_sent_and_mention(sent, mention, par_id): agents = [] patients = [] predicatives = [] # Iterate over tokens in the mention for token in mention: token_tag = sent.token_tags[token.i - sent.global_token_start] # If the token's dependency tag is nsubj, find it's parent and set the lemma of this word to # be an agent of this entity. if token_tag.dep == 'nsubj': idx = token_tag.head_global_id - sent.global_token_start agent_verb = sent.token_tags[idx].lemma agents.append(Occurrence(agent_verb, sent.sentence_id, par_id, idx, idx+1)) #print(" mention: ", mention, " token: ", token, " id ", token.i, "agent : ", agent_verb) # If the token's dependency tag is dobj or nsubjpass, find it's parent and set the lemma of this word to # be an patient of this entity. if (token_tag.dep == 'dobj') or (token_tag.dep == 'nsubjpass'): idx = token_tag.head_global_id - sent.global_token_start patient_verb = sent.token_tags[idx].lemma patients.append(Occurrence(patient_verb, sent.sentence_id, par_id, idx, idx+1)) #print(" mention: ", mention, " token: ", token, " id ", token.i, "patient : ", patient_verb) # Now we handle dependencies in the other direction to get predicatives. # 'man' is the predicative of 'Tim' in the sentence "Tim is a man." # Iterate over sentence tokens for token_tag in sent.token_tags: # Only consider tokens not in the mention: if not ((token_tag.token_global_id >= mention.start) and (token_tag.token_global_id <= mention.end)): # ignore punctuation if token_tag.pos != 'PUNCT': # Check if the parent of the word is a "be" verb (is, are, be, etc.) if sent.token_tags[token_tag.head_global_id - sent.global_token_start].lemma == "be": to_be_verb = sent.token_tags[token_tag.head_global_id - sent.global_token_start] # Check if the parent of the "be" verb is part of the mention if (to_be_verb.head_global_id >= mention.start) and (to_be_verb.head_global_id <= mention.end): idx = token_tag.token_global_id - sent.global_token_start pred_word = sent.token_tags[idx].lemma predicatives.append(Occurrence(pred_word, sent.sentence_id, par_id, idx, idx+1)) #print(" mention: ", mention, " token: ", token, " id ", token_tag.token_global_id, "predicative : ", pred_word) return agents, patients, predicatives def get_emotion_per_batch(batch, tokenizer, model): inputs = tokenizer(batch, is_split_into_words=True, return_tensors='pt', padding=True).to('cuda') outputs = model(**inputs) logits = outputs.logits probs = softmax(logits, dim=1).cpu().data.numpy() emotion_res = [EMOTIONS[x] for x in np.argmax(probs, axis=1)] emo_prob = list(np.max(probs, axis=1)) mini_emotion_res = [REDUCED_EMOTIONS[emotion] for emotion in emotion_res] return (emotion_res, mini_emotion_res, emo_prob) def merge_emotions_to_sentences(sentences, emotion_batches): emotions = [] mini_emotions = [] probs = [] for e,m,p in emotion_batches: emotions+=e mini_emotions+=m probs+=p assert len(emotions) == len(mini_emotions) assert len(emotions) == len(probs) assert len(emotions) == len(sentences) for i in range(len(sentences)): sentences[i].emotion_tags = Emotion(emotions[i], mini_emotions[i], float(probs[i])) return sentences
en
0.835646
# split on newlines followed by space # Replace newline chars # Remove empty pars # Preprocess "paragraphs" that are actually quotes or single lined text # parse into sentences # If an entry for this coref doesn't yet exist, create one # Update the entry with new mention and any parsed verbs or predicatives # Iterate over tokens in the mention # If the token's dependency tag is nsubj, find it's parent and set the lemma of this word to # be an agent of this entity. #print(" mention: ", mention, " token: ", token, " id ", token.i, "agent : ", agent_verb) # If the token's dependency tag is dobj or nsubjpass, find it's parent and set the lemma of this word to # be an patient of this entity. #print(" mention: ", mention, " token: ", token, " id ", token.i, "patient : ", patient_verb) # Now we handle dependencies in the other direction to get predicatives. # 'man' is the predicative of 'Tim' in the sentence "Tim is a man." # Iterate over sentence tokens # Only consider tokens not in the mention: # ignore punctuation # Check if the parent of the word is a "be" verb (is, are, be, etc.) # Check if the parent of the "be" verb is part of the mention #print(" mention: ", mention, " token: ", token, " id ", token_tag.token_global_id, "predicative : ", pred_word)
2.526266
3
bin/lib/Astro_Libraries/f2n.py
Delosari/dazer
0
6617626
''' Created on Jun 15, 2015 @author: vital ''' #! /usr/bin/env python """ f2n.py, the successor of f2n ! ============================== <NAME>, December 2009 U{http://obswww.unige.ch/~tewes/} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. U{http://www.gnu.org/licenses/gpl.html} About ----- f2n.py is a tiny python module to make a well-scaled PNG file out of a FITS image. It's mainly a wrapper around pyfits + PIL. Aside of these two, we use only numpy. PIL : U{http://www.pythonware.com/products/pil/} pyfits : U{http://www.stsci.edu/resources/software_hardware/pyfits} Usage ----- You can use this python script both as a module or as an executable with command line options. See the website and the examples provided in the tarball ! To learn about the methods of the f2nimage class, click on I{f2n.f2nimage} in the left menu. Features -------- f2n.py let's you crop the input image, rebin it, choose cutoffs and log or lin scales, draw masks, "upsample" the pixels without interpolation, circle and annotate objects, write titles or longer strings, and even compose several of such images side by side into one single png file. For the location of pixels for drawing + labels, we work in "image" pixels, like ds9 and sextractor. This is true even when you have choosen to crop/rebin/upsample the image : you still specify all coordinates as pixels of the original input image ! By default we produce graylevel 8-bit pngs, for minimum file size. But you are free to use colours as well (thus writing 24 bit pngs). Order of operations that should be respected for maximum performance (and to avoid "features" ... ) : - fromfits (or call to constructor) - crop - setzscale (auto, ex, flat, or your own choice) - rebin - makepilimage (lin, log, clin, or clog) (the c stands for colours... "rainbow") - drawmask, showcutoffs - upsample - drawcircle, drawrectangle, writelabel, writeinfo, writetitle, drawstarsfile, drawstarslist - tonet (or compose) Ideas for future versions ------------------------- - variant of rebin() that rebins/upscales to approach a given size, like maxsize(500). """ import sys import os import types import copy as pythoncopy import numpy as np from PIL import Image from PIL import ImageOps as imop from PIL import ImageDraw as imdw from PIL import ImageFont as imft # import Image as im #This importing format is deprecated. We have updated it # import ImageOps as imop # import ImageDraw as imdw # import ImageFont as imft import pyfits as ft # - - - - - - - - - Where are my fonts ? - - - - - - - - - - - - # To use the fonts, put the directory containing them (whose name # can be changed below) somewhere in your python path, typically # aside of this f2n.py file ! # To learn about your python path : # >>> import sys # >>> print sys.path #MODIFIED TO BE ADJUSTED ACCORDING TO THE LOCATION OF THE FONTS... WHAT A BLOODY MESS fontsdir = "/home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - __version__ = "1.1" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class f2nimage: def __init__(self,numpyarray=None, shape = (100,100), fill = 10000.0, verbose=True): """ Give me a numpyarray, or give me a shape (in this case I build my own array and fill it with the value fill). We will call the first coordinate "x", the second "y". The origin (0,0) is to be seen as the pixel in the lower left corner. When converting the numpy array to a png file, we will take care of the orientation in that way. """ self.verbose = verbose # Should I print infos about what I do ? self.z1 = -1000.0 self.z2 = 65000.0 self.binfactor = 1 # (int) We will keep this up to date, # to calculate correct drawing positions self.upsamplefactor = 1 # idem self.pilimage = None # For now, no PIL image. Some manipulations are to be done before. self.negative = False # Will be set to true if choosen in makepilimage. Changes default colours. # The draw object is created when needed, see makedraw() self.draw = None # The fonts are loaded when needed, see loadtitlefont() self.titlefont = None self.labelfont = None self.infofont = None # Now the numpy array to hold the actual data. if numpyarray == None: self.numpyarray = np.ones(shape, dtype=np.float32)*fill else: if not isinstance(numpyarray, np.ndarray): raise RuntimeError, "Please give me numpy arrays." if numpyarray.ndim != 2: raise RuntimeError, "Your array must be 2D." self.numpyarray = numpyarray.astype(np.float32) # We keep trace of any crops through these : self.xa = 0 self.ya = 0 self.xb = self.numpyarray.shape[0] self.yb = self.numpyarray.shape[1] self.origwidth = self.xb self.origheight = self.yb #self.cropwascalled = False # They will be updated by any crop method, so that we can always convert between # coordinates in the orinigal numpy array or fits image, and coordinates in the # rebinned cutout etc. # Copy method : does not work / not required. You can apply .copy() anyway, but only before # the PILimage is created. # # def copy(self): # """Returns a "deep copy" of the f2nimage object.""" # # return pythoncopy.deepcopy(self) def setzscale(self, z1="auto", z2="auto", nsig=3, samplesizelimit = 10000, border=300): """ We set z1 and z2, according to different algorithms or arguments. For both z1 and z2, give either : - "auto" (default automatic, different between z1 and z2) - "ex" (extrema) - "flat" ("sigma-cuts" around median value, well-suited for flatfields) - numeric value like 1230.34 nsig is the number of sigmas to be rejected (used by auto z1 + both flats) samplesizelimit is the maximum number of pixels to compute statistics on. If your image is larger then samplesizelimit, I will use only samplesizelimit pixels of it. If your image is 3 times border in width and height, I will skip border pixels around the image before doing calculations. This is made to get rid of the overscan and prescan etc. So you can basically leave this at 300, it will only affect images wider then 900 pixels. (300 happens to be a safe value for many telescopes.) You can put border = 0 to deactivate this feature. If you give nothing, the cutoff will not be changed. You should set the z scale directly after cropping the image. """ if self.pilimage != None: raise RuntimeError, "Cannot set z scale anymore, PIL image already exists !" if self.numpyarray.shape[0] > 3 * border and self.numpyarray.shape[1] > 3 * border: if border > 0: if self.verbose : print "For the stats I will leave a border of %i pixels" % border calcarray = self.numpyarray[border:-border, border:-border].copy() else: calcarray = self.numpyarray.copy() else: calcarray = self.numpyarray.copy() if self.verbose: print "Image is too small for a border of %i" % (border) # Starting with the simple possibilities : if z1 == "ex" : self.z1 = np.min(calcarray) if self.verbose: print "Setting ex z1 to %f" % self.z1 if z2 == "ex": self.z2 = np.max(calcarray) if self.verbose: print "Setting ex z2 to %f" % self.z2 if type(z1) == type(0) or type(z1) == type(0.0): self.z1 = z1 if self.verbose: print "Setting z1 to %f" % self.z1 if type(z2) == type(0) or type(z2) == type(0.0): self.z2 = z2 if self.verbose: print "Setting z2 to %f" % self.z2 # Now it gets a little more sophisticated. if z1 == "auto" or z2 == "auto" or z1 == "flat" or z2 == "flat": # To speed up, we do not want to do statistics on the full image if it is large. # So we prepare a small random sample of pixels. calcarray.shape = calcarray.size # We flatten the 2D array if calcarray.size > samplesizelimit : #selectionindices = np.random.random_integers(low = 0, high = calcarray.size - 1, size=samplesizelimit) selectionindices = np.linspace(0, calcarray.size-1, samplesizelimit).astype(np.int) statsel = calcarray[selectionindices] else : statsel = calcarray #nbrofbins = 10 + int(np.log10(calcarray.size)*10.0) #print "Building histogram with %i bins" % nbrofbins #nbrofbins = 100 #hist = np.histogram(statsel, bins=nbrofbins, range=(self.z1, self.z2), normed=False, weights=None, new=True) medianlevel = np.median(statsel) firststd = np.std(statsel) if z1 == "auto" : # 2 sigma clipping (quick and dirty star removal) : nearskypixvals = statsel[np.logical_and(statsel > medianlevel - 2*firststd, statsel < medianlevel + 2*firststd)] skylevel = np.median(nearskypixvals) secondstd = np.std(nearskypixvals) if self.verbose : print "Sky level at %f +/- %f" % (skylevel, secondstd) self.z1 = skylevel - nsig*secondstd if self.verbose : print "Setting auto z1 to %f, nsig = %i" % (self.z1, nsig) if z2 == "auto" : # Here we want to reject a percentage of high values... sortedstatsel = np.sort(statsel) n = round(0.9995 * statsel.size) self.z2 = sortedstatsel[n] if self.verbose : print "Setting auto z2 to %f" % self.z2 if z1 == "flat" : # 5 sigma clipping to get rid of cosmics : nearflatpixvals = statsel[np.logical_and(statsel > medianlevel - 5*firststd, statsel < medianlevel + 5*firststd)] flatlevel = np.median(nearflatpixvals) flatstd = np.std(nearflatpixvals) self.z1 = flatlevel - nsig*flatstd if self.verbose : print "Setting flat z1 : %f, nsig = %i" % (self.z1, nsig) if z2 == "flat" : # symmetric to z1 # 5 sigma clipping to get rid of cosmics : nearflatpixvals = statsel[np.logical_and(statsel > medianlevel - 5*firststd, statsel < medianlevel + 5*firststd)] flatlevel = np.median(nearflatpixvals) flatstd = np.std(nearflatpixvals) self.z2 = flatlevel + nsig*flatstd if self.verbose : print "Setting flat z2 : %f, nsig = %i" % (self.z2, nsig) def __str__(self): """ Returns a string with some info about the image. The details vary according to what you have done so far with the image. """ return_string = ["Numpy array shape : ", str(self.numpyarray.shape)] if self.xa != 0 or self.ya != 0 or self.xb != self.origwidth or self.yb != self.origheight: return_string.append("\nRegion : [%i:%i, %i:%i]" % (self.xa, self.xb, self.ya, self.yb)) return_string.extend([ "\nPixel type : %s" % str(self.numpyarray.dtype.name), "\nCutoffs : z1 = %f, z2=%f" % (self.z1,self.z2) ]) if self.pilimage != None: return_string.extend([ "\nPIL image mode : %s" % str(self.pilimage.mode), "\nPIL image shape : (%i, %i)" % (self.pilimage.size[0], self.pilimage.size[1]) ]) return ''.join(return_string) def crop(self, xa, xb, ya, yb): """ Crops the image. Two points : - We use numpy conventions xa = 200 and xb = 400 will give you a width of 200 pixels ! - We crop relative to the current array (i.e. not necessarily to the original array !) This means you can crop several times in a row with xa = 10, it will each time remove 10 pixels in x ! But we update the crop region specifications, so that the object remembers how it was cut. Please give positive integers in compatible ranges, no checks are made. """ if self.pilimage != None: raise RuntimeError, "Cannot crop anymore, PIL image already exists !" if self.verbose: print "Cropping : [%i:%i, %i:%i]" % (xa, xb, ya, yb) self.numpyarray = self.numpyarray[xa:xb, ya:yb] self.xa += xa self.ya += ya self.xb = self.xa + (xb - xa) self.yb = self.ya + (yb - ya) #if self.verbose: # print "Region is now : [%i:%i, %i:%i]" % (self.xa, self.xb, self.ya, self.yb) def irafcrop(self, irafcropstring): """ This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99). """ irafcropstring = irafcropstring[1:-1] # removing the [ ] ranges = irafcropstring.split(",") xr = ranges[0].split(":") yr = ranges[1].split(":") xmin = int(xr[0]) xmax = int(xr[1])+1 ymin = int(yr[0]) ymax = int(yr[1])+1 self.crop(xmin, xmax, ymin, ymax) def rebin(self, factor): """ I robustly rebin your image by a given factor. You simply specify a factor, and I will eventually take care of a crop to bring the image to interger-multiple-of-your-factor dimensions. Note that if you crop your image before, you must directly crop to compatible dimensions ! We update the binfactor, this allows you to draw on the image later, still using the orignial pixel coordinates. Here we work on the numpy array. """ if self.pilimage != None: raise RuntimeError, "Cannot rebin anymore, PIL image already exists !" if type(factor) != type(0): raise RuntimeError, "Rebin factor must be an integer !" if factor < 1: return origshape = np.asarray(self.numpyarray.shape) neededshape = origshape - (origshape % factor) if not (origshape == neededshape).all(): if self.verbose : print "Rebinning %ix%i : I have to crop from %s to %s" % (factor, factor, origshape, neededshape) self.crop(0, neededshape[0], 0, neededshape[1]) else: if self.verbose : print "Rebinning %ix%i : I do not need to crop" % (factor, factor) self.numpyarray = rebin(self.numpyarray, neededshape/factor) # we call the rebin function defined below # The integer division neededshape/factor is ok, we checked for this above. self.binfactor = int(self.binfactor * factor) def makepilimage(self, scale = "log", negative = False): """ Makes a PIL image out of the array, respecting the z1 and z2 cutoffs. By default we use a log scaling identical to iraf's, and produce an image of mode "L", i.e. grayscale. But some drawings or colourscales will change the mode to "RGB" later, if you choose your own colours. If you choose scale = "clog" or "clin", you get hue values (aka rainbow colours). """ if scale == "log" or scale == "lin": self.negative = negative numpyarrayshape = self.numpyarray.shape calcarray = self.numpyarray.copy() #calcarray.ravel() # does not change in place in fact ! calcarray = calcarray.clip(min = self.z1, max = self.z2) if scale == "log": calcarray = np.array(map(lambda x: loggray(x, self.z1, self.z2), calcarray)) else : calcarray = np.array(map(lambda x: lingray(x, self.z1, self.z2), calcarray)) calcarray.shape = numpyarrayshape bwarray = np.zeros(numpyarrayshape, dtype=np.uint8) calcarray.round(out=bwarray) if negative: if self.verbose: print "Using negative scale" bwarray = 255 - bwarray if self.verbose: print "PIL range : [%i, %i]" % (np.min(bwarray), np.max(bwarray)) # We flip it so that (0, 0) is back in the bottom left corner as in ds9 # We do this here, so that you can write on the image from left to right :-) self.pilimage = imop.flip(im.fromarray(bwarray.transpose())) if self.verbose: print "PIL image made with scale : %s" % scale return 0 if scale == "clog" or scale == "clin": """ rainbow ! Algorithm for HSV to RGB from http://www.cs.rit.edu/~ncs/color/t_convert.html, by <NAME> Same stuff then for f2n in C h is from 0 to 360 (hue) s from 0 to 1 (saturation) v from 0 to 1 (brightness) """ self.negative = False calcarray = self.numpyarray.transpose() if scale == "clin": calcarray = (calcarray.clip(min = self.z1, max = self.z2)-self.z1)/(self.z2 - self.z1) # 0 to 1 if scale == "clog": calcarray = 10.0 + 990.0 * (calcarray.clip(min = self.z1, max = self.z2)-self.z1)/(self.z2 - self.z1) # 10 to 1000 calcarray = (np.log10(calcarray)-1.0)*0.5 # 0 to 1 #calcarray = calcarray * 359.0 # This is now our "hue value", 0 to 360 calcarray = (1.0-calcarray) * 300.0 # I limit this to not go into red again # The order of colours is Violet < Blue < Green < Yellow < Red # We prepare the output arrays rcalcarray = np.ones(calcarray.shape) gcalcarray = np.ones(calcarray.shape) bcalcarray = np.ones(calcarray.shape) h = calcarray/60.0 # sector 0 to 5 i = np.floor(h).astype(np.int) v = 1.0 * np.ones(calcarray.shape) s = 1.0 * np.ones(calcarray.shape) f = h - i # factorial part of h, this is an array p = v * ( 1.0 - s ) q = v * ( 1.0 - s * f ) t = v * ( 1.0 - s * ( 1.0 - f ) ) # sector 0: indices = (i == 0) rcalcarray[indices] = 255.0 * v[indices] gcalcarray[indices] = 255.0 * t[indices] bcalcarray[indices] = 255.0 * p[indices] # sector 1: indices = (i == 1) rcalcarray[indices] = 255.0 * q[indices] gcalcarray[indices] = 255.0 * v[indices] bcalcarray[indices] = 255.0 * p[indices] # sector 2: indices = (i == 2) rcalcarray[indices] = 255.0 * p[indices] gcalcarray[indices] = 255.0 * v[indices] bcalcarray[indices] = 255.0 * t[indices] # sector 3: indices = (i == 3) rcalcarray[indices] = 255.0 * p[indices] gcalcarray[indices] = 255.0 * q[indices] bcalcarray[indices] = 255.0 * v[indices] # sector 4: indices = (i == 4) rcalcarray[indices] = 255.0 * t[indices] gcalcarray[indices] = 255.0 * p[indices] bcalcarray[indices] = 255.0 * v[indices] # sector 5: indices = (i == 5) rcalcarray[indices] = 255.0 * v[indices] gcalcarray[indices] = 255.0 * p[indices] bcalcarray[indices] = 255.0 * q[indices] rarray = np.zeros(calcarray.shape, dtype=np.uint8) garray = np.zeros(calcarray.shape, dtype=np.uint8) barray = np.zeros(calcarray.shape, dtype=np.uint8) rcalcarray.round(out=rarray) gcalcarray.round(out=garray) bcalcarray.round(out=barray) carray = np.dstack((rarray,garray,barray)) self.pilimage = imop.flip(im.fromarray(carray, "RGB")) if self.verbose: print "PIL image made with scale : %s" % scale return 0 raise RuntimeError, "I don't know your colourscale, choose lin log clin or clog !" def drawmask(self, maskarray, colour = 128): """ I draw a mask on the image. Give me a numpy "maskarray" of same size as mine, and I draw on the pilimage all pixels of the maskarray that are True in the maskcolour. By default, colour is gray, to avoid switching to RGB. But if you give for instance (255, 0, 0), I will do the switch. """ self.checkforpilimage() self.changecolourmode(colour) self.makedraw() # Checking size of maskarray : if maskarray.shape[0] != self.pilimage.size[0] or maskarray.shape[1] != self.pilimage.size[1]: raise RuntimeError, "Mask and image must have the same size !" # We make an "L" mode image out of the mask : tmparray = np.zeros(maskarray.shape, dtype=np.uint8) tmparray[maskarray] = 255 maskpil = imop.flip(im.fromarray(tmparray.transpose())) # We make a plain colour image : if type(colour) == type(0) : plainpil = im.new("L", self.pilimage.size, colour) else : plainpil = im.new("RGB", self.pilimage.size, colour) # We switch self to RGB if needed : self.changecolourmode(colour) # And now use the function composite to "blend" our image with the plain colour image : self.pilimage = im.composite(plainpil, self.pilimage, maskpil) # As we have changed the image object, we have to rebuild the draw object : self.draw = None def showcutoffs(self, redblue = False): """ We use drawmask to visualize pixels above and below the z cutoffs. By default this is done in black (above) and white (below) (and adapts to negative images). But if you choose redblue = True, I use red for above z2 and blue for below z1. """ highmask = self.numpyarray > self.z2 lowmask = self.numpyarray < self.z1 if redblue == False : if self.negative : self.drawmask(highmask, colour = 255) self.drawmask(lowmask, colour = 0) else : self.drawmask(highmask, colour = 0) self.drawmask(lowmask, colour = 255) else : self.drawmask(highmask, colour = (255, 0, 0)) self.drawmask(lowmask, colour = (0, 0, 255)) def checkforpilimage(self): """Auxiliary method to check if the PIL image was already made.""" if self.pilimage == None: raise RuntimeError, "No PIL image : call makepilimage first !" def makedraw(self): """Auxiliary method to make a draw object if not yet done. This is also called by changecolourmode, when we go from L to RGB, to get a new draw object. """ if self.draw == None: self.draw = imdw.Draw(self.pilimage) def defaultcolour(self, colour): """ Auxiliary method to choose a default colour. Give me a user provided colour : if it is None, I change it to the default colour, respecting negative. Plus, if the image is in RGB mode and you give me 128 for a gray, I translate this to the expected (128, 128, 128) ... """ if colour == None: if self.negative == True: if self.pilimage.mode == "L" : return 0 else : return (0, 0, 0) else : if self.pilimage.mode == "L" : return 255 else : return (255, 255, 255) else : if self.pilimage.mode == "RGB" and type(colour) == type(0): return (colour, colour, colour) else : return colour def loadtitlefont(self): """Auxiliary method to load font if not yet done.""" if self.titlefont == None: # print 'the bloody fonts dir is????', fontsdir # print 'pero esto que hace??', os.path.join(fontsdir, "courR18.pil") # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/f2n_fonts/courR18.pil # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts self.titlefont = imft.load_path(os.path.join(fontsdir, "courR18.pil")) def loadinfofont(self): """Auxiliary method to load font if not yet done.""" if self.infofont == None: self.infofont = imft.load_path(os.path.join(fontsdir, "courR10.pil")) def loadlabelfont(self): """Auxiliary method to load font if not yet done.""" if self.labelfont == None: self.labelfont = imft.load_path(os.path.join(fontsdir, "courR10.pil")) def changecolourmode(self, newcolour): """Auxiliary method to change the colour mode. Give me a colour (either an int, or a 3-tuple, values 0 to 255) and I decide if the image mode has to be switched from "L" to "RGB". """ if type(newcolour) != type(0) and self.pilimage.mode != "RGB": if self.verbose : print "Switching to RGB !" self.pilimage = self.pilimage.convert("RGB") self.draw = None # important, we have to bebuild the draw object. self.makedraw() def upsample(self, factor): """ The inverse operation of rebin, applied on the PIL image. Do this before writing text or drawing on the image ! The coordinates will be automatically converted for you """ self.checkforpilimage() if type(factor) != type(0): raise RuntimeError, "Upsample factor must be an integer !" if self.verbose: print "Upsampling by a factor of %i" % factor self.pilimage = self.pilimage.resize((self.pilimage.size[0] * factor, self.pilimage.size[1] * factor)) self.upsamplefactor = factor self.draw = None def pilcoords(self, (x,y)): """ Converts the coordinates (x,y) of the original array or FITS file to the current coordinates of the PIL image, respecting cropping, rebinning, and upsampling. This is only used once the PIL image is available, for drawing. Note that we also have to take care about the different origin conventions here ! For PIL, (0,0) is top left, so the y axis needs to be inverted. """ pilx = int((x - 1 - self.xa) * float(self.upsamplefactor) / float(self.binfactor)) pily = int((self.yb - y) * float(self.upsamplefactor) / float(self.binfactor)) return (pilx, pily) def pilscale(self, r): """ Converts a "scale" (like an aperture radius) of the original array or FITS file to the current PIL coordinates. """ return r * float(self.upsamplefactor) / float(self.binfactor) def drawpoint(self, x, y, colour = None): """ Most elementary drawing, single pixel, used mainly for testing purposes. Coordinates are those of your initial image ! """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilx, pily) = self.pilcoords((x,y)) self.draw.point((pilx, pily), fill = colour) def drawcircle(self, x, y, r = 10, colour = None, label = None): """ Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image ! You give these x and y in the usual ds9 pixels, (0,0) is bottom left. I will convert this into the right PIL coordiates. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilx, pily) = self.pilcoords((x,y)) pilr = self.pilscale(r) self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour) if label != None: # The we write it : self.loadlabelfont() textwidth = self.draw.textsize(label, font = self.labelfont)[0] self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont) def drawrectangle(self, xa, xb, ya, yb, colour=None, label = None): """ Draws a 1-pixel wide frame AROUND the region you specify. Same convention as for crop(). """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilxa, pilya) = self.pilcoords((xa,ya)) (pilxb, pilyb) = self.pilcoords((xb,yb)) self.draw.rectangle([(pilxa, pilyb-1), (pilxb+1, pilya)], outline = colour) if label != None: # The we write it : self.loadlabelfont() textwidth = self.draw.textsize(label, font = self.labelfont)[0] self.draw.text(((pilxa + pilxb)/2.0 - float(textwidth)/2.0 + 1, pilya + 2), label, fill = colour, font = self.labelfont) # Replaced by the label options above : # # def writelabel(self, x, y, string, r = 10, colour = None): # """ # Made to put a label below of the circle. We use the radius to adapt the distance. # (So the coordinates (x,y) are those of the circle center...) # If you do not care about circles, put r = 0 and I will center the text at your coordinates. # """ # # self.checkforpilimage() # colour = self.defaultcolour(colour) # self.changecolourmode(colour) # self.makedraw() # # # # Similar to drawcircle, but we shift the label a bit above and right... # (pilx, pily) = self.pilcoords((x,y)) # pilr = self.pilscale(r) # # self.loadlabelfont() # textwidth = self.draw.textsize(string, font = self.labelfont)[0] # self.draw.text((pilx - float(textwidth)/2.0 + 1, pily + pilr + 1), string, fill = colour, font = self.labelfont) # def writetitle(self, titlestring, colour = None): """ We write a title, centered below the image. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() self.loadtitlefont() imgwidth = self.pilimage.size[0] imgheight = self.pilimage.size[1] textwidth = self.draw.textsize(titlestring, font = self.titlefont)[0] textxpos = imgwidth/2.0 - textwidth/2.0 textypos = imgheight - 30 self.draw.text((textxpos, textypos), titlestring, fill = colour, font = self.titlefont) if self.verbose : print "I've written a title on the image." def writeinfo(self, linelist, colour = None): """ We add a longer chunk of text on the upper left corner of the image. Provide linelist, a list of strings that will be written one below the other. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() self.loadinfofont() for i, line in enumerate(linelist): topspacing = 5 + (12 + 5)*i self.draw.text((10, topspacing), line, fill = colour, font = self.infofont) if self.verbose : print "I've written some info on the image." def drawstarslist(self, dictlist, r = 10, colour = None): """ Calls drawcircle and writelable for an list of stars. Provide a list of dictionnaries, where each dictionnary contains "name", "x", and "y". """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() for star in dictlist: self.drawcircle(star["x"], star["y"], r = r, colour = colour, label = star["name"]) #self.writelabel(star["x"], star["y"], star["name"], r = r, colour = colour) if self.verbose : print "I've drawn %i stars." % len(dictlist) def drawstarsfile(self, filename, r = 10, colour = None): """ Same as drawstarlist but we read the stars from a file. Here we read a text file of hand picked stars. Same format as for cosmouline, that is : # comment starA 23.4 45.6 [other stuff...] Then we pass this to drawstarlist, """ if not os.path.isfile(filename): print "File does not exist :" print filename print "Line format to write : name x y [other stuff ...]" raise RuntimeError, "Cannot read star catalog." catfile = open(filename, "r") lines = catfile.readlines() catfile.close dictlist=[] # We will append dicts here. for i, line in enumerate(lines): if line[0] == '#' or len(line) < 4: continue elements = line.split() nbelements = len(elements) if nbelements < 3: print "Format error on line", i+1, "of :" print filename print "We want : name x y [other stuff ...]" raise RuntimeError, "Cannot read star catalog." name = elements[0] x = float(elements[1]) y = float(elements[2]) dictlist.append({"name":name, "x":x, "y":y}) if self.verbose : print "I've read %i stars from :" print os.path.split(filename)[1] self.drawstarslist(dictlist, r = r, colour = colour) def tonet(self, outfile): """ Writes the PIL image into a png. We do not want to flip the image at this stage, as you might have written on it ! """ self.checkforpilimage() if self.verbose : print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode) self.pilimage.save(outfile, "PNG") def lingray(x, a, b): """Auxiliary function that specifies the linear gray scale. a and b are the cutoffs.""" return 255 * (x-float(a))/(b-a) def loggray(x, a, b): """Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs.""" linval = 10.0 + 990.0 * (x-float(a))/(b-a) return (np.log10(linval)-1.0)*0.5 * 255.0 def fromfits(infile, hdu = 0, verbose = True): """ Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) """ pixelarray, hdr = ft.getdata(infile, hdu, header=True) pixelarray = np.asarray(pixelarray).transpose() #print pixelarray pixelarrayshape = pixelarray.shape if verbose : print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]) print "Input file BITPIX : %s" % (hdr["BITPIX"]) pixelarrayshape = np.asarray(pixelarrayshape) if verbose : print "Internal array type :", pixelarray.dtype.name return f2nimage(pixelarray, verbose = verbose) def rebin(a, newshape): """ Auxiliary function to rebin ndarray data. Source : http://www.scipy.org/Cookbook/Rebinning example usage: >>> a=rand(6,4); b=rebin(a,(3,2)) """ shape = a.shape lenShape = len(shape) factor = np.asarray(shape)/np.asarray(newshape) #print factor evList = ['a.reshape('] + \ ['newshape[%d],factor[%d],'%(i,i) for i in xrange(lenShape)] + \ [')'] + ['.sum(%d)'%(i+1) for i in xrange(lenShape)] + \ ['/factor[%d]'%i for i in xrange(lenShape)] return eval(''.join(evList)) def compose(f2nimages, outfile): """ Takes f2nimages and writes them into one single png file, side by side. f2nimages is a list of horizontal lines, where each line is a list of f2nimages. For instance : [ [image1, image2], [image3, image4] ] The sizes of these images have to "match", so that the final result is rectangular. This function is verbose if any of the images is verbose. """ # We start by doing some checks, and try to print out helpfull error messages. verbosity = [] colourmodes = [] for i, line in enumerate(f2nimages): for j, img in enumerate(line): if img.verbose: print "Checking line %i, image %i (verbose)..." % (i+1, j+1) img.checkforpilimage() verbosity.append(img.verbose) colourmodes.append(img.pilimage.mode) verbose = np.any(np.array(verbosity)) # So we set the verbosity used in this function to true if any of the images is verbose. colours = list(set(colourmodes)) # We check if the widths are compatible : widths = [np.sum(np.array([img.pilimage.size[0] for img in line])) for line in f2nimages] if len(set(widths)) != 1 : print "Total widths of the lines :" print widths raise RuntimeError, "The total widths of your lines are not compatible !" totwidth = widths[0] # Similar for the heights : for i, line in enumerate(f2nimages): heights = [img.pilimage.size[1] for img in line] if len(set(heights)) != 1 : print "Heights of the images in line %i :" % (i + 1) print heights raise RuntimeError, "Heights of the images in line %i are not compatible." % (i + 1) totheight = np.sum(np.array([line[0].pilimage.size[1] for line in f2nimages])) # Ok, now it should be safe to go for the composition : if verbose: print "Composition size : %i x %i" % (totwidth, totheight) if verbose: print "Colour modes of input : %s" % colours if len(colours) == 1 and colours[0] == "L" : if verbose : print "Builing graylevel composition" compoimg = im.new("L", (totwidth, totheight), 128) else: if verbose : print "Building RGB composition" compoimg = im.new("RGB", (totwidth, totheight), (255, 0, 0)) y = 0 for line in f2nimages: x = 0 for img in line: box = (x, y, x+img.pilimage.size[0], y+img.pilimage.size[1]) #print box compoimg.paste(img.pilimage, box) x += img.pilimage.size[0] y += img.pilimage.size[1] if verbose: print "Writing compositions to %s...\n%i x %i pixels, mode %s" % (outfile, compoimg.size[0], compoimg.size[1], compoimg.mode) compoimg.save(outfile, "PNG") def isnumeric(value): """ "0.2355" would return True. A little auxiliary function for command line parsing. """ return str(value).replace(".", "").replace("-", "").isdigit() #if __name__ == "__main__": # """When the script is called and not imported, we simply pass the command line arguments to the fitstopng function.""" # # #from optparse import OptionParser # #parser = OptionParser() # #(options, args) = parser.parse_args() # #print args # # usagemessage = """ #usage : #python f2n.py in.fits out.png rebinning # #where : #- rebinning : 0 to not rebin, 2 to rebin 2x2, 3 to rebin 3x3 ... # #""" # # if len(sys.argv) < 4: # print usagemessage # sys.exit() # # #for arg in sys.argv: # # print arg # # fitstopng(sys.argv[1], sys.argv[2], int(sys.argv[3])) # if __name__ == "__main__": """ We read command-line-arguments when the script is called and not imported. With this we can perform some standart operations, immitating the old f2n in C. There is only support for the most basic stuff in this mode. Yes, I know that I should use getopt instead of argv ... """ args = sys.argv[1:] if len(args) != 4 and len(args) != 6: print """ usage : python f2n.py in.fits 1 log out.png [auto auto] The 1 is the binning factor Put a negative value and you get upsampled Then, log can be one of log, lin, clog or clin You can optionally add z1 and z2 cutoffs : auto, ex, flat, or 12.3 Enjoy ! """ sys.exit(1) else: arg_input = str(args[0]) arg_rebin = int(args[1]) arg_upsample = 1 if arg_rebin < 0: arg_upsample = - arg_rebin arg_rebin = 1 arg_scale = str(args[2]) arg_output = str(args[3]) if len(args) == 6: arg_z1 = args[4] arg_z2 = args[5] if isnumeric(arg_z1): arg_z1 = float(arg_z1) if isnumeric(arg_z2): arg_z2 = float(arg_z2) else: arg_z1 = "auto" arg_z2 = "auto" myimage = fromfits(arg_input) myimage.setzscale(arg_z1, arg_z2) myimage.rebin(arg_rebin) myimage.makepilimage(arg_scale) myimage.upsample(arg_upsample) myimage.tonet(arg_output)
''' Created on Jun 15, 2015 @author: vital ''' #! /usr/bin/env python """ f2n.py, the successor of f2n ! ============================== <NAME>, December 2009 U{http://obswww.unige.ch/~tewes/} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. U{http://www.gnu.org/licenses/gpl.html} About ----- f2n.py is a tiny python module to make a well-scaled PNG file out of a FITS image. It's mainly a wrapper around pyfits + PIL. Aside of these two, we use only numpy. PIL : U{http://www.pythonware.com/products/pil/} pyfits : U{http://www.stsci.edu/resources/software_hardware/pyfits} Usage ----- You can use this python script both as a module or as an executable with command line options. See the website and the examples provided in the tarball ! To learn about the methods of the f2nimage class, click on I{f2n.f2nimage} in the left menu. Features -------- f2n.py let's you crop the input image, rebin it, choose cutoffs and log or lin scales, draw masks, "upsample" the pixels without interpolation, circle and annotate objects, write titles or longer strings, and even compose several of such images side by side into one single png file. For the location of pixels for drawing + labels, we work in "image" pixels, like ds9 and sextractor. This is true even when you have choosen to crop/rebin/upsample the image : you still specify all coordinates as pixels of the original input image ! By default we produce graylevel 8-bit pngs, for minimum file size. But you are free to use colours as well (thus writing 24 bit pngs). Order of operations that should be respected for maximum performance (and to avoid "features" ... ) : - fromfits (or call to constructor) - crop - setzscale (auto, ex, flat, or your own choice) - rebin - makepilimage (lin, log, clin, or clog) (the c stands for colours... "rainbow") - drawmask, showcutoffs - upsample - drawcircle, drawrectangle, writelabel, writeinfo, writetitle, drawstarsfile, drawstarslist - tonet (or compose) Ideas for future versions ------------------------- - variant of rebin() that rebins/upscales to approach a given size, like maxsize(500). """ import sys import os import types import copy as pythoncopy import numpy as np from PIL import Image from PIL import ImageOps as imop from PIL import ImageDraw as imdw from PIL import ImageFont as imft # import Image as im #This importing format is deprecated. We have updated it # import ImageOps as imop # import ImageDraw as imdw # import ImageFont as imft import pyfits as ft # - - - - - - - - - Where are my fonts ? - - - - - - - - - - - - # To use the fonts, put the directory containing them (whose name # can be changed below) somewhere in your python path, typically # aside of this f2n.py file ! # To learn about your python path : # >>> import sys # >>> print sys.path #MODIFIED TO BE ADJUSTED ACCORDING TO THE LOCATION OF THE FONTS... WHAT A BLOODY MESS fontsdir = "/home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - __version__ = "1.1" # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class f2nimage: def __init__(self,numpyarray=None, shape = (100,100), fill = 10000.0, verbose=True): """ Give me a numpyarray, or give me a shape (in this case I build my own array and fill it with the value fill). We will call the first coordinate "x", the second "y". The origin (0,0) is to be seen as the pixel in the lower left corner. When converting the numpy array to a png file, we will take care of the orientation in that way. """ self.verbose = verbose # Should I print infos about what I do ? self.z1 = -1000.0 self.z2 = 65000.0 self.binfactor = 1 # (int) We will keep this up to date, # to calculate correct drawing positions self.upsamplefactor = 1 # idem self.pilimage = None # For now, no PIL image. Some manipulations are to be done before. self.negative = False # Will be set to true if choosen in makepilimage. Changes default colours. # The draw object is created when needed, see makedraw() self.draw = None # The fonts are loaded when needed, see loadtitlefont() self.titlefont = None self.labelfont = None self.infofont = None # Now the numpy array to hold the actual data. if numpyarray == None: self.numpyarray = np.ones(shape, dtype=np.float32)*fill else: if not isinstance(numpyarray, np.ndarray): raise RuntimeError, "Please give me numpy arrays." if numpyarray.ndim != 2: raise RuntimeError, "Your array must be 2D." self.numpyarray = numpyarray.astype(np.float32) # We keep trace of any crops through these : self.xa = 0 self.ya = 0 self.xb = self.numpyarray.shape[0] self.yb = self.numpyarray.shape[1] self.origwidth = self.xb self.origheight = self.yb #self.cropwascalled = False # They will be updated by any crop method, so that we can always convert between # coordinates in the orinigal numpy array or fits image, and coordinates in the # rebinned cutout etc. # Copy method : does not work / not required. You can apply .copy() anyway, but only before # the PILimage is created. # # def copy(self): # """Returns a "deep copy" of the f2nimage object.""" # # return pythoncopy.deepcopy(self) def setzscale(self, z1="auto", z2="auto", nsig=3, samplesizelimit = 10000, border=300): """ We set z1 and z2, according to different algorithms or arguments. For both z1 and z2, give either : - "auto" (default automatic, different between z1 and z2) - "ex" (extrema) - "flat" ("sigma-cuts" around median value, well-suited for flatfields) - numeric value like 1230.34 nsig is the number of sigmas to be rejected (used by auto z1 + both flats) samplesizelimit is the maximum number of pixels to compute statistics on. If your image is larger then samplesizelimit, I will use only samplesizelimit pixels of it. If your image is 3 times border in width and height, I will skip border pixels around the image before doing calculations. This is made to get rid of the overscan and prescan etc. So you can basically leave this at 300, it will only affect images wider then 900 pixels. (300 happens to be a safe value for many telescopes.) You can put border = 0 to deactivate this feature. If you give nothing, the cutoff will not be changed. You should set the z scale directly after cropping the image. """ if self.pilimage != None: raise RuntimeError, "Cannot set z scale anymore, PIL image already exists !" if self.numpyarray.shape[0] > 3 * border and self.numpyarray.shape[1] > 3 * border: if border > 0: if self.verbose : print "For the stats I will leave a border of %i pixels" % border calcarray = self.numpyarray[border:-border, border:-border].copy() else: calcarray = self.numpyarray.copy() else: calcarray = self.numpyarray.copy() if self.verbose: print "Image is too small for a border of %i" % (border) # Starting with the simple possibilities : if z1 == "ex" : self.z1 = np.min(calcarray) if self.verbose: print "Setting ex z1 to %f" % self.z1 if z2 == "ex": self.z2 = np.max(calcarray) if self.verbose: print "Setting ex z2 to %f" % self.z2 if type(z1) == type(0) or type(z1) == type(0.0): self.z1 = z1 if self.verbose: print "Setting z1 to %f" % self.z1 if type(z2) == type(0) or type(z2) == type(0.0): self.z2 = z2 if self.verbose: print "Setting z2 to %f" % self.z2 # Now it gets a little more sophisticated. if z1 == "auto" or z2 == "auto" or z1 == "flat" or z2 == "flat": # To speed up, we do not want to do statistics on the full image if it is large. # So we prepare a small random sample of pixels. calcarray.shape = calcarray.size # We flatten the 2D array if calcarray.size > samplesizelimit : #selectionindices = np.random.random_integers(low = 0, high = calcarray.size - 1, size=samplesizelimit) selectionindices = np.linspace(0, calcarray.size-1, samplesizelimit).astype(np.int) statsel = calcarray[selectionindices] else : statsel = calcarray #nbrofbins = 10 + int(np.log10(calcarray.size)*10.0) #print "Building histogram with %i bins" % nbrofbins #nbrofbins = 100 #hist = np.histogram(statsel, bins=nbrofbins, range=(self.z1, self.z2), normed=False, weights=None, new=True) medianlevel = np.median(statsel) firststd = np.std(statsel) if z1 == "auto" : # 2 sigma clipping (quick and dirty star removal) : nearskypixvals = statsel[np.logical_and(statsel > medianlevel - 2*firststd, statsel < medianlevel + 2*firststd)] skylevel = np.median(nearskypixvals) secondstd = np.std(nearskypixvals) if self.verbose : print "Sky level at %f +/- %f" % (skylevel, secondstd) self.z1 = skylevel - nsig*secondstd if self.verbose : print "Setting auto z1 to %f, nsig = %i" % (self.z1, nsig) if z2 == "auto" : # Here we want to reject a percentage of high values... sortedstatsel = np.sort(statsel) n = round(0.9995 * statsel.size) self.z2 = sortedstatsel[n] if self.verbose : print "Setting auto z2 to %f" % self.z2 if z1 == "flat" : # 5 sigma clipping to get rid of cosmics : nearflatpixvals = statsel[np.logical_and(statsel > medianlevel - 5*firststd, statsel < medianlevel + 5*firststd)] flatlevel = np.median(nearflatpixvals) flatstd = np.std(nearflatpixvals) self.z1 = flatlevel - nsig*flatstd if self.verbose : print "Setting flat z1 : %f, nsig = %i" % (self.z1, nsig) if z2 == "flat" : # symmetric to z1 # 5 sigma clipping to get rid of cosmics : nearflatpixvals = statsel[np.logical_and(statsel > medianlevel - 5*firststd, statsel < medianlevel + 5*firststd)] flatlevel = np.median(nearflatpixvals) flatstd = np.std(nearflatpixvals) self.z2 = flatlevel + nsig*flatstd if self.verbose : print "Setting flat z2 : %f, nsig = %i" % (self.z2, nsig) def __str__(self): """ Returns a string with some info about the image. The details vary according to what you have done so far with the image. """ return_string = ["Numpy array shape : ", str(self.numpyarray.shape)] if self.xa != 0 or self.ya != 0 or self.xb != self.origwidth or self.yb != self.origheight: return_string.append("\nRegion : [%i:%i, %i:%i]" % (self.xa, self.xb, self.ya, self.yb)) return_string.extend([ "\nPixel type : %s" % str(self.numpyarray.dtype.name), "\nCutoffs : z1 = %f, z2=%f" % (self.z1,self.z2) ]) if self.pilimage != None: return_string.extend([ "\nPIL image mode : %s" % str(self.pilimage.mode), "\nPIL image shape : (%i, %i)" % (self.pilimage.size[0], self.pilimage.size[1]) ]) return ''.join(return_string) def crop(self, xa, xb, ya, yb): """ Crops the image. Two points : - We use numpy conventions xa = 200 and xb = 400 will give you a width of 200 pixels ! - We crop relative to the current array (i.e. not necessarily to the original array !) This means you can crop several times in a row with xa = 10, it will each time remove 10 pixels in x ! But we update the crop region specifications, so that the object remembers how it was cut. Please give positive integers in compatible ranges, no checks are made. """ if self.pilimage != None: raise RuntimeError, "Cannot crop anymore, PIL image already exists !" if self.verbose: print "Cropping : [%i:%i, %i:%i]" % (xa, xb, ya, yb) self.numpyarray = self.numpyarray[xa:xb, ya:yb] self.xa += xa self.ya += ya self.xb = self.xa + (xb - xa) self.yb = self.ya + (yb - ya) #if self.verbose: # print "Region is now : [%i:%i, %i:%i]" % (self.xa, self.xb, self.ya, self.yb) def irafcrop(self, irafcropstring): """ This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99). """ irafcropstring = irafcropstring[1:-1] # removing the [ ] ranges = irafcropstring.split(",") xr = ranges[0].split(":") yr = ranges[1].split(":") xmin = int(xr[0]) xmax = int(xr[1])+1 ymin = int(yr[0]) ymax = int(yr[1])+1 self.crop(xmin, xmax, ymin, ymax) def rebin(self, factor): """ I robustly rebin your image by a given factor. You simply specify a factor, and I will eventually take care of a crop to bring the image to interger-multiple-of-your-factor dimensions. Note that if you crop your image before, you must directly crop to compatible dimensions ! We update the binfactor, this allows you to draw on the image later, still using the orignial pixel coordinates. Here we work on the numpy array. """ if self.pilimage != None: raise RuntimeError, "Cannot rebin anymore, PIL image already exists !" if type(factor) != type(0): raise RuntimeError, "Rebin factor must be an integer !" if factor < 1: return origshape = np.asarray(self.numpyarray.shape) neededshape = origshape - (origshape % factor) if not (origshape == neededshape).all(): if self.verbose : print "Rebinning %ix%i : I have to crop from %s to %s" % (factor, factor, origshape, neededshape) self.crop(0, neededshape[0], 0, neededshape[1]) else: if self.verbose : print "Rebinning %ix%i : I do not need to crop" % (factor, factor) self.numpyarray = rebin(self.numpyarray, neededshape/factor) # we call the rebin function defined below # The integer division neededshape/factor is ok, we checked for this above. self.binfactor = int(self.binfactor * factor) def makepilimage(self, scale = "log", negative = False): """ Makes a PIL image out of the array, respecting the z1 and z2 cutoffs. By default we use a log scaling identical to iraf's, and produce an image of mode "L", i.e. grayscale. But some drawings or colourscales will change the mode to "RGB" later, if you choose your own colours. If you choose scale = "clog" or "clin", you get hue values (aka rainbow colours). """ if scale == "log" or scale == "lin": self.negative = negative numpyarrayshape = self.numpyarray.shape calcarray = self.numpyarray.copy() #calcarray.ravel() # does not change in place in fact ! calcarray = calcarray.clip(min = self.z1, max = self.z2) if scale == "log": calcarray = np.array(map(lambda x: loggray(x, self.z1, self.z2), calcarray)) else : calcarray = np.array(map(lambda x: lingray(x, self.z1, self.z2), calcarray)) calcarray.shape = numpyarrayshape bwarray = np.zeros(numpyarrayshape, dtype=np.uint8) calcarray.round(out=bwarray) if negative: if self.verbose: print "Using negative scale" bwarray = 255 - bwarray if self.verbose: print "PIL range : [%i, %i]" % (np.min(bwarray), np.max(bwarray)) # We flip it so that (0, 0) is back in the bottom left corner as in ds9 # We do this here, so that you can write on the image from left to right :-) self.pilimage = imop.flip(im.fromarray(bwarray.transpose())) if self.verbose: print "PIL image made with scale : %s" % scale return 0 if scale == "clog" or scale == "clin": """ rainbow ! Algorithm for HSV to RGB from http://www.cs.rit.edu/~ncs/color/t_convert.html, by <NAME> Same stuff then for f2n in C h is from 0 to 360 (hue) s from 0 to 1 (saturation) v from 0 to 1 (brightness) """ self.negative = False calcarray = self.numpyarray.transpose() if scale == "clin": calcarray = (calcarray.clip(min = self.z1, max = self.z2)-self.z1)/(self.z2 - self.z1) # 0 to 1 if scale == "clog": calcarray = 10.0 + 990.0 * (calcarray.clip(min = self.z1, max = self.z2)-self.z1)/(self.z2 - self.z1) # 10 to 1000 calcarray = (np.log10(calcarray)-1.0)*0.5 # 0 to 1 #calcarray = calcarray * 359.0 # This is now our "hue value", 0 to 360 calcarray = (1.0-calcarray) * 300.0 # I limit this to not go into red again # The order of colours is Violet < Blue < Green < Yellow < Red # We prepare the output arrays rcalcarray = np.ones(calcarray.shape) gcalcarray = np.ones(calcarray.shape) bcalcarray = np.ones(calcarray.shape) h = calcarray/60.0 # sector 0 to 5 i = np.floor(h).astype(np.int) v = 1.0 * np.ones(calcarray.shape) s = 1.0 * np.ones(calcarray.shape) f = h - i # factorial part of h, this is an array p = v * ( 1.0 - s ) q = v * ( 1.0 - s * f ) t = v * ( 1.0 - s * ( 1.0 - f ) ) # sector 0: indices = (i == 0) rcalcarray[indices] = 255.0 * v[indices] gcalcarray[indices] = 255.0 * t[indices] bcalcarray[indices] = 255.0 * p[indices] # sector 1: indices = (i == 1) rcalcarray[indices] = 255.0 * q[indices] gcalcarray[indices] = 255.0 * v[indices] bcalcarray[indices] = 255.0 * p[indices] # sector 2: indices = (i == 2) rcalcarray[indices] = 255.0 * p[indices] gcalcarray[indices] = 255.0 * v[indices] bcalcarray[indices] = 255.0 * t[indices] # sector 3: indices = (i == 3) rcalcarray[indices] = 255.0 * p[indices] gcalcarray[indices] = 255.0 * q[indices] bcalcarray[indices] = 255.0 * v[indices] # sector 4: indices = (i == 4) rcalcarray[indices] = 255.0 * t[indices] gcalcarray[indices] = 255.0 * p[indices] bcalcarray[indices] = 255.0 * v[indices] # sector 5: indices = (i == 5) rcalcarray[indices] = 255.0 * v[indices] gcalcarray[indices] = 255.0 * p[indices] bcalcarray[indices] = 255.0 * q[indices] rarray = np.zeros(calcarray.shape, dtype=np.uint8) garray = np.zeros(calcarray.shape, dtype=np.uint8) barray = np.zeros(calcarray.shape, dtype=np.uint8) rcalcarray.round(out=rarray) gcalcarray.round(out=garray) bcalcarray.round(out=barray) carray = np.dstack((rarray,garray,barray)) self.pilimage = imop.flip(im.fromarray(carray, "RGB")) if self.verbose: print "PIL image made with scale : %s" % scale return 0 raise RuntimeError, "I don't know your colourscale, choose lin log clin or clog !" def drawmask(self, maskarray, colour = 128): """ I draw a mask on the image. Give me a numpy "maskarray" of same size as mine, and I draw on the pilimage all pixels of the maskarray that are True in the maskcolour. By default, colour is gray, to avoid switching to RGB. But if you give for instance (255, 0, 0), I will do the switch. """ self.checkforpilimage() self.changecolourmode(colour) self.makedraw() # Checking size of maskarray : if maskarray.shape[0] != self.pilimage.size[0] or maskarray.shape[1] != self.pilimage.size[1]: raise RuntimeError, "Mask and image must have the same size !" # We make an "L" mode image out of the mask : tmparray = np.zeros(maskarray.shape, dtype=np.uint8) tmparray[maskarray] = 255 maskpil = imop.flip(im.fromarray(tmparray.transpose())) # We make a plain colour image : if type(colour) == type(0) : plainpil = im.new("L", self.pilimage.size, colour) else : plainpil = im.new("RGB", self.pilimage.size, colour) # We switch self to RGB if needed : self.changecolourmode(colour) # And now use the function composite to "blend" our image with the plain colour image : self.pilimage = im.composite(plainpil, self.pilimage, maskpil) # As we have changed the image object, we have to rebuild the draw object : self.draw = None def showcutoffs(self, redblue = False): """ We use drawmask to visualize pixels above and below the z cutoffs. By default this is done in black (above) and white (below) (and adapts to negative images). But if you choose redblue = True, I use red for above z2 and blue for below z1. """ highmask = self.numpyarray > self.z2 lowmask = self.numpyarray < self.z1 if redblue == False : if self.negative : self.drawmask(highmask, colour = 255) self.drawmask(lowmask, colour = 0) else : self.drawmask(highmask, colour = 0) self.drawmask(lowmask, colour = 255) else : self.drawmask(highmask, colour = (255, 0, 0)) self.drawmask(lowmask, colour = (0, 0, 255)) def checkforpilimage(self): """Auxiliary method to check if the PIL image was already made.""" if self.pilimage == None: raise RuntimeError, "No PIL image : call makepilimage first !" def makedraw(self): """Auxiliary method to make a draw object if not yet done. This is also called by changecolourmode, when we go from L to RGB, to get a new draw object. """ if self.draw == None: self.draw = imdw.Draw(self.pilimage) def defaultcolour(self, colour): """ Auxiliary method to choose a default colour. Give me a user provided colour : if it is None, I change it to the default colour, respecting negative. Plus, if the image is in RGB mode and you give me 128 for a gray, I translate this to the expected (128, 128, 128) ... """ if colour == None: if self.negative == True: if self.pilimage.mode == "L" : return 0 else : return (0, 0, 0) else : if self.pilimage.mode == "L" : return 255 else : return (255, 255, 255) else : if self.pilimage.mode == "RGB" and type(colour) == type(0): return (colour, colour, colour) else : return colour def loadtitlefont(self): """Auxiliary method to load font if not yet done.""" if self.titlefont == None: # print 'the bloody fonts dir is????', fontsdir # print 'pero esto que hace??', os.path.join(fontsdir, "courR18.pil") # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/f2n_fonts/courR18.pil # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts self.titlefont = imft.load_path(os.path.join(fontsdir, "courR18.pil")) def loadinfofont(self): """Auxiliary method to load font if not yet done.""" if self.infofont == None: self.infofont = imft.load_path(os.path.join(fontsdir, "courR10.pil")) def loadlabelfont(self): """Auxiliary method to load font if not yet done.""" if self.labelfont == None: self.labelfont = imft.load_path(os.path.join(fontsdir, "courR10.pil")) def changecolourmode(self, newcolour): """Auxiliary method to change the colour mode. Give me a colour (either an int, or a 3-tuple, values 0 to 255) and I decide if the image mode has to be switched from "L" to "RGB". """ if type(newcolour) != type(0) and self.pilimage.mode != "RGB": if self.verbose : print "Switching to RGB !" self.pilimage = self.pilimage.convert("RGB") self.draw = None # important, we have to bebuild the draw object. self.makedraw() def upsample(self, factor): """ The inverse operation of rebin, applied on the PIL image. Do this before writing text or drawing on the image ! The coordinates will be automatically converted for you """ self.checkforpilimage() if type(factor) != type(0): raise RuntimeError, "Upsample factor must be an integer !" if self.verbose: print "Upsampling by a factor of %i" % factor self.pilimage = self.pilimage.resize((self.pilimage.size[0] * factor, self.pilimage.size[1] * factor)) self.upsamplefactor = factor self.draw = None def pilcoords(self, (x,y)): """ Converts the coordinates (x,y) of the original array or FITS file to the current coordinates of the PIL image, respecting cropping, rebinning, and upsampling. This is only used once the PIL image is available, for drawing. Note that we also have to take care about the different origin conventions here ! For PIL, (0,0) is top left, so the y axis needs to be inverted. """ pilx = int((x - 1 - self.xa) * float(self.upsamplefactor) / float(self.binfactor)) pily = int((self.yb - y) * float(self.upsamplefactor) / float(self.binfactor)) return (pilx, pily) def pilscale(self, r): """ Converts a "scale" (like an aperture radius) of the original array or FITS file to the current PIL coordinates. """ return r * float(self.upsamplefactor) / float(self.binfactor) def drawpoint(self, x, y, colour = None): """ Most elementary drawing, single pixel, used mainly for testing purposes. Coordinates are those of your initial image ! """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilx, pily) = self.pilcoords((x,y)) self.draw.point((pilx, pily), fill = colour) def drawcircle(self, x, y, r = 10, colour = None, label = None): """ Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image ! You give these x and y in the usual ds9 pixels, (0,0) is bottom left. I will convert this into the right PIL coordiates. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilx, pily) = self.pilcoords((x,y)) pilr = self.pilscale(r) self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour) if label != None: # The we write it : self.loadlabelfont() textwidth = self.draw.textsize(label, font = self.labelfont)[0] self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont) def drawrectangle(self, xa, xb, ya, yb, colour=None, label = None): """ Draws a 1-pixel wide frame AROUND the region you specify. Same convention as for crop(). """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilxa, pilya) = self.pilcoords((xa,ya)) (pilxb, pilyb) = self.pilcoords((xb,yb)) self.draw.rectangle([(pilxa, pilyb-1), (pilxb+1, pilya)], outline = colour) if label != None: # The we write it : self.loadlabelfont() textwidth = self.draw.textsize(label, font = self.labelfont)[0] self.draw.text(((pilxa + pilxb)/2.0 - float(textwidth)/2.0 + 1, pilya + 2), label, fill = colour, font = self.labelfont) # Replaced by the label options above : # # def writelabel(self, x, y, string, r = 10, colour = None): # """ # Made to put a label below of the circle. We use the radius to adapt the distance. # (So the coordinates (x,y) are those of the circle center...) # If you do not care about circles, put r = 0 and I will center the text at your coordinates. # """ # # self.checkforpilimage() # colour = self.defaultcolour(colour) # self.changecolourmode(colour) # self.makedraw() # # # # Similar to drawcircle, but we shift the label a bit above and right... # (pilx, pily) = self.pilcoords((x,y)) # pilr = self.pilscale(r) # # self.loadlabelfont() # textwidth = self.draw.textsize(string, font = self.labelfont)[0] # self.draw.text((pilx - float(textwidth)/2.0 + 1, pily + pilr + 1), string, fill = colour, font = self.labelfont) # def writetitle(self, titlestring, colour = None): """ We write a title, centered below the image. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() self.loadtitlefont() imgwidth = self.pilimage.size[0] imgheight = self.pilimage.size[1] textwidth = self.draw.textsize(titlestring, font = self.titlefont)[0] textxpos = imgwidth/2.0 - textwidth/2.0 textypos = imgheight - 30 self.draw.text((textxpos, textypos), titlestring, fill = colour, font = self.titlefont) if self.verbose : print "I've written a title on the image." def writeinfo(self, linelist, colour = None): """ We add a longer chunk of text on the upper left corner of the image. Provide linelist, a list of strings that will be written one below the other. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() self.loadinfofont() for i, line in enumerate(linelist): topspacing = 5 + (12 + 5)*i self.draw.text((10, topspacing), line, fill = colour, font = self.infofont) if self.verbose : print "I've written some info on the image." def drawstarslist(self, dictlist, r = 10, colour = None): """ Calls drawcircle and writelable for an list of stars. Provide a list of dictionnaries, where each dictionnary contains "name", "x", and "y". """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() for star in dictlist: self.drawcircle(star["x"], star["y"], r = r, colour = colour, label = star["name"]) #self.writelabel(star["x"], star["y"], star["name"], r = r, colour = colour) if self.verbose : print "I've drawn %i stars." % len(dictlist) def drawstarsfile(self, filename, r = 10, colour = None): """ Same as drawstarlist but we read the stars from a file. Here we read a text file of hand picked stars. Same format as for cosmouline, that is : # comment starA 23.4 45.6 [other stuff...] Then we pass this to drawstarlist, """ if not os.path.isfile(filename): print "File does not exist :" print filename print "Line format to write : name x y [other stuff ...]" raise RuntimeError, "Cannot read star catalog." catfile = open(filename, "r") lines = catfile.readlines() catfile.close dictlist=[] # We will append dicts here. for i, line in enumerate(lines): if line[0] == '#' or len(line) < 4: continue elements = line.split() nbelements = len(elements) if nbelements < 3: print "Format error on line", i+1, "of :" print filename print "We want : name x y [other stuff ...]" raise RuntimeError, "Cannot read star catalog." name = elements[0] x = float(elements[1]) y = float(elements[2]) dictlist.append({"name":name, "x":x, "y":y}) if self.verbose : print "I've read %i stars from :" print os.path.split(filename)[1] self.drawstarslist(dictlist, r = r, colour = colour) def tonet(self, outfile): """ Writes the PIL image into a png. We do not want to flip the image at this stage, as you might have written on it ! """ self.checkforpilimage() if self.verbose : print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode) self.pilimage.save(outfile, "PNG") def lingray(x, a, b): """Auxiliary function that specifies the linear gray scale. a and b are the cutoffs.""" return 255 * (x-float(a))/(b-a) def loggray(x, a, b): """Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs.""" linval = 10.0 + 990.0 * (x-float(a))/(b-a) return (np.log10(linval)-1.0)*0.5 * 255.0 def fromfits(infile, hdu = 0, verbose = True): """ Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) """ pixelarray, hdr = ft.getdata(infile, hdu, header=True) pixelarray = np.asarray(pixelarray).transpose() #print pixelarray pixelarrayshape = pixelarray.shape if verbose : print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]) print "Input file BITPIX : %s" % (hdr["BITPIX"]) pixelarrayshape = np.asarray(pixelarrayshape) if verbose : print "Internal array type :", pixelarray.dtype.name return f2nimage(pixelarray, verbose = verbose) def rebin(a, newshape): """ Auxiliary function to rebin ndarray data. Source : http://www.scipy.org/Cookbook/Rebinning example usage: >>> a=rand(6,4); b=rebin(a,(3,2)) """ shape = a.shape lenShape = len(shape) factor = np.asarray(shape)/np.asarray(newshape) #print factor evList = ['a.reshape('] + \ ['newshape[%d],factor[%d],'%(i,i) for i in xrange(lenShape)] + \ [')'] + ['.sum(%d)'%(i+1) for i in xrange(lenShape)] + \ ['/factor[%d]'%i for i in xrange(lenShape)] return eval(''.join(evList)) def compose(f2nimages, outfile): """ Takes f2nimages and writes them into one single png file, side by side. f2nimages is a list of horizontal lines, where each line is a list of f2nimages. For instance : [ [image1, image2], [image3, image4] ] The sizes of these images have to "match", so that the final result is rectangular. This function is verbose if any of the images is verbose. """ # We start by doing some checks, and try to print out helpfull error messages. verbosity = [] colourmodes = [] for i, line in enumerate(f2nimages): for j, img in enumerate(line): if img.verbose: print "Checking line %i, image %i (verbose)..." % (i+1, j+1) img.checkforpilimage() verbosity.append(img.verbose) colourmodes.append(img.pilimage.mode) verbose = np.any(np.array(verbosity)) # So we set the verbosity used in this function to true if any of the images is verbose. colours = list(set(colourmodes)) # We check if the widths are compatible : widths = [np.sum(np.array([img.pilimage.size[0] for img in line])) for line in f2nimages] if len(set(widths)) != 1 : print "Total widths of the lines :" print widths raise RuntimeError, "The total widths of your lines are not compatible !" totwidth = widths[0] # Similar for the heights : for i, line in enumerate(f2nimages): heights = [img.pilimage.size[1] for img in line] if len(set(heights)) != 1 : print "Heights of the images in line %i :" % (i + 1) print heights raise RuntimeError, "Heights of the images in line %i are not compatible." % (i + 1) totheight = np.sum(np.array([line[0].pilimage.size[1] for line in f2nimages])) # Ok, now it should be safe to go for the composition : if verbose: print "Composition size : %i x %i" % (totwidth, totheight) if verbose: print "Colour modes of input : %s" % colours if len(colours) == 1 and colours[0] == "L" : if verbose : print "Builing graylevel composition" compoimg = im.new("L", (totwidth, totheight), 128) else: if verbose : print "Building RGB composition" compoimg = im.new("RGB", (totwidth, totheight), (255, 0, 0)) y = 0 for line in f2nimages: x = 0 for img in line: box = (x, y, x+img.pilimage.size[0], y+img.pilimage.size[1]) #print box compoimg.paste(img.pilimage, box) x += img.pilimage.size[0] y += img.pilimage.size[1] if verbose: print "Writing compositions to %s...\n%i x %i pixels, mode %s" % (outfile, compoimg.size[0], compoimg.size[1], compoimg.mode) compoimg.save(outfile, "PNG") def isnumeric(value): """ "0.2355" would return True. A little auxiliary function for command line parsing. """ return str(value).replace(".", "").replace("-", "").isdigit() #if __name__ == "__main__": # """When the script is called and not imported, we simply pass the command line arguments to the fitstopng function.""" # # #from optparse import OptionParser # #parser = OptionParser() # #(options, args) = parser.parse_args() # #print args # # usagemessage = """ #usage : #python f2n.py in.fits out.png rebinning # #where : #- rebinning : 0 to not rebin, 2 to rebin 2x2, 3 to rebin 3x3 ... # #""" # # if len(sys.argv) < 4: # print usagemessage # sys.exit() # # #for arg in sys.argv: # # print arg # # fitstopng(sys.argv[1], sys.argv[2], int(sys.argv[3])) # if __name__ == "__main__": """ We read command-line-arguments when the script is called and not imported. With this we can perform some standart operations, immitating the old f2n in C. There is only support for the most basic stuff in this mode. Yes, I know that I should use getopt instead of argv ... """ args = sys.argv[1:] if len(args) != 4 and len(args) != 6: print """ usage : python f2n.py in.fits 1 log out.png [auto auto] The 1 is the binning factor Put a negative value and you get upsampled Then, log can be one of log, lin, clog or clin You can optionally add z1 and z2 cutoffs : auto, ex, flat, or 12.3 Enjoy ! """ sys.exit(1) else: arg_input = str(args[0]) arg_rebin = int(args[1]) arg_upsample = 1 if arg_rebin < 0: arg_upsample = - arg_rebin arg_rebin = 1 arg_scale = str(args[2]) arg_output = str(args[3]) if len(args) == 6: arg_z1 = args[4] arg_z2 = args[5] if isnumeric(arg_z1): arg_z1 = float(arg_z1) if isnumeric(arg_z2): arg_z2 = float(arg_z2) else: arg_z1 = "auto" arg_z2 = "auto" myimage = fromfits(arg_input) myimage.setzscale(arg_z1, arg_z2) myimage.rebin(arg_rebin) myimage.makepilimage(arg_scale) myimage.upsample(arg_upsample) myimage.tonet(arg_output)
en
0.791265
Created on Jun 15, 2015 @author: vital #! /usr/bin/env python f2n.py, the successor of f2n ! ============================== <NAME>, December 2009 U{http://obswww.unige.ch/~tewes/} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. U{http://www.gnu.org/licenses/gpl.html} About ----- f2n.py is a tiny python module to make a well-scaled PNG file out of a FITS image. It's mainly a wrapper around pyfits + PIL. Aside of these two, we use only numpy. PIL : U{http://www.pythonware.com/products/pil/} pyfits : U{http://www.stsci.edu/resources/software_hardware/pyfits} Usage ----- You can use this python script both as a module or as an executable with command line options. See the website and the examples provided in the tarball ! To learn about the methods of the f2nimage class, click on I{f2n.f2nimage} in the left menu. Features -------- f2n.py let's you crop the input image, rebin it, choose cutoffs and log or lin scales, draw masks, "upsample" the pixels without interpolation, circle and annotate objects, write titles or longer strings, and even compose several of such images side by side into one single png file. For the location of pixels for drawing + labels, we work in "image" pixels, like ds9 and sextractor. This is true even when you have choosen to crop/rebin/upsample the image : you still specify all coordinates as pixels of the original input image ! By default we produce graylevel 8-bit pngs, for minimum file size. But you are free to use colours as well (thus writing 24 bit pngs). Order of operations that should be respected for maximum performance (and to avoid "features" ... ) : - fromfits (or call to constructor) - crop - setzscale (auto, ex, flat, or your own choice) - rebin - makepilimage (lin, log, clin, or clog) (the c stands for colours... "rainbow") - drawmask, showcutoffs - upsample - drawcircle, drawrectangle, writelabel, writeinfo, writetitle, drawstarsfile, drawstarslist - tonet (or compose) Ideas for future versions ------------------------- - variant of rebin() that rebins/upscales to approach a given size, like maxsize(500). # import Image as im #This importing format is deprecated. We have updated it # import ImageOps as imop # import ImageDraw as imdw # import ImageFont as imft # - - - - - - - - - Where are my fonts ? - - - - - - - - - - - - # To use the fonts, put the directory containing them (whose name # can be changed below) somewhere in your python path, typically # aside of this f2n.py file ! # To learn about your python path : # >>> import sys # >>> print sys.path #MODIFIED TO BE ADJUSTED ACCORDING TO THE LOCATION OF THE FONTS... WHAT A BLOODY MESS # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Give me a numpyarray, or give me a shape (in this case I build my own array and fill it with the value fill). We will call the first coordinate "x", the second "y". The origin (0,0) is to be seen as the pixel in the lower left corner. When converting the numpy array to a png file, we will take care of the orientation in that way. # Should I print infos about what I do ? # (int) We will keep this up to date, # to calculate correct drawing positions # idem # For now, no PIL image. Some manipulations are to be done before. # Will be set to true if choosen in makepilimage. Changes default colours. # The draw object is created when needed, see makedraw() # The fonts are loaded when needed, see loadtitlefont() # Now the numpy array to hold the actual data. # We keep trace of any crops through these : #self.cropwascalled = False # They will be updated by any crop method, so that we can always convert between # coordinates in the orinigal numpy array or fits image, and coordinates in the # rebinned cutout etc. # Copy method : does not work / not required. You can apply .copy() anyway, but only before # the PILimage is created. # # def copy(self): # """Returns a "deep copy" of the f2nimage object.""" # # return pythoncopy.deepcopy(self) We set z1 and z2, according to different algorithms or arguments. For both z1 and z2, give either : - "auto" (default automatic, different between z1 and z2) - "ex" (extrema) - "flat" ("sigma-cuts" around median value, well-suited for flatfields) - numeric value like 1230.34 nsig is the number of sigmas to be rejected (used by auto z1 + both flats) samplesizelimit is the maximum number of pixels to compute statistics on. If your image is larger then samplesizelimit, I will use only samplesizelimit pixels of it. If your image is 3 times border in width and height, I will skip border pixels around the image before doing calculations. This is made to get rid of the overscan and prescan etc. So you can basically leave this at 300, it will only affect images wider then 900 pixels. (300 happens to be a safe value for many telescopes.) You can put border = 0 to deactivate this feature. If you give nothing, the cutoff will not be changed. You should set the z scale directly after cropping the image. # Starting with the simple possibilities : # Now it gets a little more sophisticated. # To speed up, we do not want to do statistics on the full image if it is large. # So we prepare a small random sample of pixels. # We flatten the 2D array #selectionindices = np.random.random_integers(low = 0, high = calcarray.size - 1, size=samplesizelimit) #nbrofbins = 10 + int(np.log10(calcarray.size)*10.0) #print "Building histogram with %i bins" % nbrofbins #nbrofbins = 100 #hist = np.histogram(statsel, bins=nbrofbins, range=(self.z1, self.z2), normed=False, weights=None, new=True) # 2 sigma clipping (quick and dirty star removal) : # Here we want to reject a percentage of high values... # 5 sigma clipping to get rid of cosmics : # symmetric to z1 # 5 sigma clipping to get rid of cosmics : Returns a string with some info about the image. The details vary according to what you have done so far with the image. Crops the image. Two points : - We use numpy conventions xa = 200 and xb = 400 will give you a width of 200 pixels ! - We crop relative to the current array (i.e. not necessarily to the original array !) This means you can crop several times in a row with xa = 10, it will each time remove 10 pixels in x ! But we update the crop region specifications, so that the object remembers how it was cut. Please give positive integers in compatible ranges, no checks are made. #if self.verbose: # print "Region is now : [%i:%i, %i:%i]" % (self.xa, self.xb, self.ya, self.yb) This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99). # removing the [ ] I robustly rebin your image by a given factor. You simply specify a factor, and I will eventually take care of a crop to bring the image to interger-multiple-of-your-factor dimensions. Note that if you crop your image before, you must directly crop to compatible dimensions ! We update the binfactor, this allows you to draw on the image later, still using the orignial pixel coordinates. Here we work on the numpy array. # we call the rebin function defined below # The integer division neededshape/factor is ok, we checked for this above. Makes a PIL image out of the array, respecting the z1 and z2 cutoffs. By default we use a log scaling identical to iraf's, and produce an image of mode "L", i.e. grayscale. But some drawings or colourscales will change the mode to "RGB" later, if you choose your own colours. If you choose scale = "clog" or "clin", you get hue values (aka rainbow colours). #calcarray.ravel() # does not change in place in fact ! # We flip it so that (0, 0) is back in the bottom left corner as in ds9 # We do this here, so that you can write on the image from left to right :-) rainbow ! Algorithm for HSV to RGB from http://www.cs.rit.edu/~ncs/color/t_convert.html, by <NAME> Same stuff then for f2n in C h is from 0 to 360 (hue) s from 0 to 1 (saturation) v from 0 to 1 (brightness) # 0 to 1 # 10 to 1000 # 0 to 1 #calcarray = calcarray * 359.0 # This is now our "hue value", 0 to 360 # I limit this to not go into red again # The order of colours is Violet < Blue < Green < Yellow < Red # We prepare the output arrays # sector 0 to 5 # factorial part of h, this is an array # sector 0: # sector 1: # sector 2: # sector 3: # sector 4: # sector 5: I draw a mask on the image. Give me a numpy "maskarray" of same size as mine, and I draw on the pilimage all pixels of the maskarray that are True in the maskcolour. By default, colour is gray, to avoid switching to RGB. But if you give for instance (255, 0, 0), I will do the switch. # Checking size of maskarray : # We make an "L" mode image out of the mask : # We make a plain colour image : # We switch self to RGB if needed : # And now use the function composite to "blend" our image with the plain colour image : # As we have changed the image object, we have to rebuild the draw object : We use drawmask to visualize pixels above and below the z cutoffs. By default this is done in black (above) and white (below) (and adapts to negative images). But if you choose redblue = True, I use red for above z2 and blue for below z1. Auxiliary method to check if the PIL image was already made. Auxiliary method to make a draw object if not yet done. This is also called by changecolourmode, when we go from L to RGB, to get a new draw object. Auxiliary method to choose a default colour. Give me a user provided colour : if it is None, I change it to the default colour, respecting negative. Plus, if the image is in RGB mode and you give me 128 for a gray, I translate this to the expected (128, 128, 128) ... Auxiliary method to load font if not yet done. # print 'the bloody fonts dir is????', fontsdir # print 'pero esto que hace??', os.path.join(fontsdir, "courR18.pil") # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/f2n_fonts/courR18.pil # /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts Auxiliary method to load font if not yet done. Auxiliary method to load font if not yet done. Auxiliary method to change the colour mode. Give me a colour (either an int, or a 3-tuple, values 0 to 255) and I decide if the image mode has to be switched from "L" to "RGB". # important, we have to bebuild the draw object. The inverse operation of rebin, applied on the PIL image. Do this before writing text or drawing on the image ! The coordinates will be automatically converted for you Converts the coordinates (x,y) of the original array or FITS file to the current coordinates of the PIL image, respecting cropping, rebinning, and upsampling. This is only used once the PIL image is available, for drawing. Note that we also have to take care about the different origin conventions here ! For PIL, (0,0) is top left, so the y axis needs to be inverted. Converts a "scale" (like an aperture radius) of the original array or FITS file to the current PIL coordinates. Most elementary drawing, single pixel, used mainly for testing purposes. Coordinates are those of your initial image ! Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image ! You give these x and y in the usual ds9 pixels, (0,0) is bottom left. I will convert this into the right PIL coordiates. # The we write it : Draws a 1-pixel wide frame AROUND the region you specify. Same convention as for crop(). # The we write it : # Replaced by the label options above : # # def writelabel(self, x, y, string, r = 10, colour = None): # """ # Made to put a label below of the circle. We use the radius to adapt the distance. # (So the coordinates (x,y) are those of the circle center...) # If you do not care about circles, put r = 0 and I will center the text at your coordinates. # """ # # self.checkforpilimage() # colour = self.defaultcolour(colour) # self.changecolourmode(colour) # self.makedraw() # # # # Similar to drawcircle, but we shift the label a bit above and right... # (pilx, pily) = self.pilcoords((x,y)) # pilr = self.pilscale(r) # # self.loadlabelfont() # textwidth = self.draw.textsize(string, font = self.labelfont)[0] # self.draw.text((pilx - float(textwidth)/2.0 + 1, pily + pilr + 1), string, fill = colour, font = self.labelfont) # We write a title, centered below the image. We add a longer chunk of text on the upper left corner of the image. Provide linelist, a list of strings that will be written one below the other. Calls drawcircle and writelable for an list of stars. Provide a list of dictionnaries, where each dictionnary contains "name", "x", and "y". #self.writelabel(star["x"], star["y"], star["name"], r = r, colour = colour) Same as drawstarlist but we read the stars from a file. Here we read a text file of hand picked stars. Same format as for cosmouline, that is : # comment starA 23.4 45.6 [other stuff...] Then we pass this to drawstarlist, # We will append dicts here. Writes the PIL image into a png. We do not want to flip the image at this stage, as you might have written on it ! Auxiliary function that specifies the linear gray scale. a and b are the cutoffs. Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs. Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) #print pixelarray Auxiliary function to rebin ndarray data. Source : http://www.scipy.org/Cookbook/Rebinning example usage: >>> a=rand(6,4); b=rebin(a,(3,2)) #print factor Takes f2nimages and writes them into one single png file, side by side. f2nimages is a list of horizontal lines, where each line is a list of f2nimages. For instance : [ [image1, image2], [image3, image4] ] The sizes of these images have to "match", so that the final result is rectangular. This function is verbose if any of the images is verbose. # We start by doing some checks, and try to print out helpfull error messages. # So we set the verbosity used in this function to true if any of the images is verbose. # We check if the widths are compatible : # Similar for the heights : # Ok, now it should be safe to go for the composition : #print box "0.2355" would return True. A little auxiliary function for command line parsing. #if __name__ == "__main__": # """When the script is called and not imported, we simply pass the command line arguments to the fitstopng function.""" # # #from optparse import OptionParser # #parser = OptionParser() # #(options, args) = parser.parse_args() # #print args # # usagemessage = """ #usage : #python f2n.py in.fits out.png rebinning # #where : #- rebinning : 0 to not rebin, 2 to rebin 2x2, 3 to rebin 3x3 ... # #""" # # if len(sys.argv) < 4: # print usagemessage # sys.exit() # # #for arg in sys.argv: # # print arg # # fitstopng(sys.argv[1], sys.argv[2], int(sys.argv[3])) # We read command-line-arguments when the script is called and not imported. With this we can perform some standart operations, immitating the old f2n in C. There is only support for the most basic stuff in this mode. Yes, I know that I should use getopt instead of argv ... usage : python f2n.py in.fits 1 log out.png [auto auto] The 1 is the binning factor Put a negative value and you get upsampled Then, log can be one of log, lin, clog or clin You can optionally add z1 and z2 cutoffs : auto, ex, flat, or 12.3 Enjoy !
2.512638
3
setup.py
mon4ter/aiostaticmap
0
6617627
from re import search from setuptools import setup with open('src/aiostaticmap/__init__.py') as f: version = str(search(r"__version__ = '(.*)'", f.read()).group(1)) with open('README.md') as f: long_description = f.read() setup( name='aiostaticmap', version=version, packages=['aiostaticmap'], package_dir={'': 'src'}, install_requires=[ 'Pillow >= 7.0, < 8.0', 'aiohttp >= 3.0, < 4.0', ], # setup_requires=['pytest-runner'], # tests_require=[ # 'pytest', # 'pytest-aiohttp', # ], url='https://github.com/mon4ter/aiostaticmap', license='Apache License 2.0', author='<NAME>, <NAME>', author_email='<EMAIL>, <EMAIL>', description='A small, python-based library for creating map images with lines and markers.', long_description=long_description, long_description_content_type='text/markdown', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', ], keywords='static map image osm aio async asyncio', )
from re import search from setuptools import setup with open('src/aiostaticmap/__init__.py') as f: version = str(search(r"__version__ = '(.*)'", f.read()).group(1)) with open('README.md') as f: long_description = f.read() setup( name='aiostaticmap', version=version, packages=['aiostaticmap'], package_dir={'': 'src'}, install_requires=[ 'Pillow >= 7.0, < 8.0', 'aiohttp >= 3.0, < 4.0', ], # setup_requires=['pytest-runner'], # tests_require=[ # 'pytest', # 'pytest-aiohttp', # ], url='https://github.com/mon4ter/aiostaticmap', license='Apache License 2.0', author='<NAME>, <NAME>', author_email='<EMAIL>, <EMAIL>', description='A small, python-based library for creating map images with lines and markers.', long_description=long_description, long_description_content_type='text/markdown', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', ], keywords='static map image osm aio async asyncio', )
en
0.237237
# setup_requires=['pytest-runner'], # tests_require=[ # 'pytest', # 'pytest-aiohttp', # ],
1.46938
1
DatasetRefactorer/Example.py
ZeoZagart/TF_QA
1
6617628
<reponame>ZeoZagart/TF_QA import json from typing import List, Tuple from DatasetRefactorer.Constants import * class TrainExample : long_ans: str = None short_ans: List[str] = None yes_no_ans: int = 0 question_text: str = None is_ans_correct: bool = False def __init__(self, long_ans: str, short_ans: str, yes_no: int, question_text: str, is_correct: bool) : self.long_ans = long_ans self.short_ans = short_ans self.yes_no_ans = yes_no self.question_text = question_text self.is_ans_correct = is_correct def __repr__(self) : return json.dumps(self.to_dict()) def to_list(self) : return [self.question_text, self.long_ans, self.short_ans, self.yes_no_ans, self.is_ans_correct] def to_dict(self) : return {"question_text": self.question_text, "long_ans": self.long_ans, "short_ans": self.short_ans, "yes_no": self.yes_no_ans, "is_correct": self.is_ans_correct} class TestExample : long_ans: str = None question_id: str = None question_text: str = None def __init__(self, question_id: str, question_text: str, long_ans: str) : self.long_ans = long_ans self.question_id = question_id self.question_text = question_text def __init__(self, item_dict) : self.long_ans = item_dict["long_ans"] self.question_id = item_dict["question_id"] self.question_text = item_dict["question_text"] def to_dict(self) : return {"question_id": self.question_id, "question_text": self.question_text, "long_ans": self.long_ans} class ExampleCreator : def test_item_to_examples(data_item) -> List[TestExample]: test_set = [] question_id = data_item[EXAMPLE_ID] question = data_item[QUESTION_TEXT] document = data_item[DOCUMENT_TEXT].split() for idx, candidate in enumerate(data_item[LONG_ANSWER_CANDIDATES]) : if candidate[TOP_LEVEL] == False : continue long_ans = ExampleCreator.get_string_from_token_list( document[candidate[START_TOKEN]:candidate[END_TOKEN]]) test_set.append(TestExample(question_id, question, long_ans)) return test_set def train_item_to_examples(data_item) -> List[TrainExample]: train_set = [] question = data_item[QUESTION_TEXT] document = data_item[DOCUMENT_TEXT].split() long_ans_idx = ExampleCreator.get_outermost_long_ans_index(data_item[ANNOTATIONS][0],data_item[LONG_ANSWER_CANDIDATES]) for idx, candidate in enumerate(data_item[LONG_ANSWER_CANDIDATES]) : if candidate[TOP_LEVEL] == False : continue if idx == long_ans_idx : [long_ans, short_ans, yes_no_ans] = ExampleCreator.get_ans_from_annotation(data_item[ANNOTATIONS][0], document) train_set.append(TrainExample(long_ans, short_ans, yes_no_ans, question, True)) else : long_ans = ExampleCreator.get_string_from_token_list( document[candidate[START_TOKEN]:candidate[END_TOKEN]]) train_set.append(TrainExample(long_ans, None, 0, question, False)) return train_set def get_outermost_long_ans_index(annotations, long_ans_candidates) -> int: long_ans = annotations[LONG_ANSWER] if long_ans == None or len(long_ans) == 0 : return -1 candidate_idx = long_ans[CANDIDATE_IDX] while long_ans_candidates[candidate_idx][TOP_LEVEL] == False : candidate_idx -= 1 return candidate_idx def get_ans_from_annotation(annotations, document: str) -> List[str]: long_string = None short_string = None if len(annotations[LONG_ANSWER]) > 0 : long_start = annotations[LONG_ANSWER][START_TOKEN] long_end = annotations[LONG_ANSWER][END_TOKEN] long_string = ExampleCreator.get_string_from_token_list(document[long_start: long_end]) short_answers = [] for ans in annotations[SHORT_ANSWER] : short_start, short_end = ans[START_TOKEN], ans[END_TOKEN] if short_start < long_start or short_end > long_end : continue short_string = ExampleCreator.get_string_from_token_list(document[short_start: short_end], is_short_ans = True) short_answers.append(short_string) yes_no_ans = annotations[YES_NO].lower() if yes_no_ans == 'true' : yes_no_ans = 2 elif yes_no_ans == 'false' : yes_no_ans = 1 else : yes_no_ans = 0 return [long_string, short_answers, yes_no_ans] def get_string_from_token_list(tokens, is_short_ans: bool = False) -> str: if is_short_ans == True : token_join = ' '.join(tokens) else : token_join = ' '.join([token for token in tokens if token[0] != '<']) return token_join
import json from typing import List, Tuple from DatasetRefactorer.Constants import * class TrainExample : long_ans: str = None short_ans: List[str] = None yes_no_ans: int = 0 question_text: str = None is_ans_correct: bool = False def __init__(self, long_ans: str, short_ans: str, yes_no: int, question_text: str, is_correct: bool) : self.long_ans = long_ans self.short_ans = short_ans self.yes_no_ans = yes_no self.question_text = question_text self.is_ans_correct = is_correct def __repr__(self) : return json.dumps(self.to_dict()) def to_list(self) : return [self.question_text, self.long_ans, self.short_ans, self.yes_no_ans, self.is_ans_correct] def to_dict(self) : return {"question_text": self.question_text, "long_ans": self.long_ans, "short_ans": self.short_ans, "yes_no": self.yes_no_ans, "is_correct": self.is_ans_correct} class TestExample : long_ans: str = None question_id: str = None question_text: str = None def __init__(self, question_id: str, question_text: str, long_ans: str) : self.long_ans = long_ans self.question_id = question_id self.question_text = question_text def __init__(self, item_dict) : self.long_ans = item_dict["long_ans"] self.question_id = item_dict["question_id"] self.question_text = item_dict["question_text"] def to_dict(self) : return {"question_id": self.question_id, "question_text": self.question_text, "long_ans": self.long_ans} class ExampleCreator : def test_item_to_examples(data_item) -> List[TestExample]: test_set = [] question_id = data_item[EXAMPLE_ID] question = data_item[QUESTION_TEXT] document = data_item[DOCUMENT_TEXT].split() for idx, candidate in enumerate(data_item[LONG_ANSWER_CANDIDATES]) : if candidate[TOP_LEVEL] == False : continue long_ans = ExampleCreator.get_string_from_token_list( document[candidate[START_TOKEN]:candidate[END_TOKEN]]) test_set.append(TestExample(question_id, question, long_ans)) return test_set def train_item_to_examples(data_item) -> List[TrainExample]: train_set = [] question = data_item[QUESTION_TEXT] document = data_item[DOCUMENT_TEXT].split() long_ans_idx = ExampleCreator.get_outermost_long_ans_index(data_item[ANNOTATIONS][0],data_item[LONG_ANSWER_CANDIDATES]) for idx, candidate in enumerate(data_item[LONG_ANSWER_CANDIDATES]) : if candidate[TOP_LEVEL] == False : continue if idx == long_ans_idx : [long_ans, short_ans, yes_no_ans] = ExampleCreator.get_ans_from_annotation(data_item[ANNOTATIONS][0], document) train_set.append(TrainExample(long_ans, short_ans, yes_no_ans, question, True)) else : long_ans = ExampleCreator.get_string_from_token_list( document[candidate[START_TOKEN]:candidate[END_TOKEN]]) train_set.append(TrainExample(long_ans, None, 0, question, False)) return train_set def get_outermost_long_ans_index(annotations, long_ans_candidates) -> int: long_ans = annotations[LONG_ANSWER] if long_ans == None or len(long_ans) == 0 : return -1 candidate_idx = long_ans[CANDIDATE_IDX] while long_ans_candidates[candidate_idx][TOP_LEVEL] == False : candidate_idx -= 1 return candidate_idx def get_ans_from_annotation(annotations, document: str) -> List[str]: long_string = None short_string = None if len(annotations[LONG_ANSWER]) > 0 : long_start = annotations[LONG_ANSWER][START_TOKEN] long_end = annotations[LONG_ANSWER][END_TOKEN] long_string = ExampleCreator.get_string_from_token_list(document[long_start: long_end]) short_answers = [] for ans in annotations[SHORT_ANSWER] : short_start, short_end = ans[START_TOKEN], ans[END_TOKEN] if short_start < long_start or short_end > long_end : continue short_string = ExampleCreator.get_string_from_token_list(document[short_start: short_end], is_short_ans = True) short_answers.append(short_string) yes_no_ans = annotations[YES_NO].lower() if yes_no_ans == 'true' : yes_no_ans = 2 elif yes_no_ans == 'false' : yes_no_ans = 1 else : yes_no_ans = 0 return [long_string, short_answers, yes_no_ans] def get_string_from_token_list(tokens, is_short_ans: bool = False) -> str: if is_short_ans == True : token_join = ' '.join(tokens) else : token_join = ' '.join([token for token in tokens if token[0] != '<']) return token_join
none
1
2.839169
3
asposecellscloud/models/data_bar.py
aspose-cells-cloud/aspose-cells-cloud-python
3
6617629
# coding: utf-8 """ Copyright (c) 2021 Aspose.Cells Cloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE """ from pprint import pformat from six import iteritems import re class DataBar(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'direction': 'str', 'max_cfvo': 'ConditionalFormattingValue', 'color': 'Color', 'min_length': 'int', 'bar_fill_type': 'str', 'min_cfvo': 'ConditionalFormattingValue', 'axis_position': 'str', 'negative_bar_format': 'NegativeBarFormat', 'bar_border': 'DataBarBorder', 'axis_color': 'Color', 'max_length': 'int', 'show_value': 'bool' } attribute_map = { 'direction': 'Direction', 'max_cfvo': 'MaxCfvo', 'color': 'Color', 'min_length': 'MinLength', 'bar_fill_type': 'BarFillType', 'min_cfvo': 'MinCfvo', 'axis_position': 'AxisPosition', 'negative_bar_format': 'NegativeBarFormat', 'bar_border': 'BarBorder', 'axis_color': 'AxisColor', 'max_length': 'MaxLength', 'show_value': 'ShowValue' } @staticmethod def get_swagger_types(): return DataBar.swagger_types @staticmethod def get_attribute_map(): return DataBar.attribute_map def get_from_container(self, attr): if attr in self.container: return self.container[attr] return None def __init__(self, direction=None, max_cfvo=None, color=None, min_length=None, bar_fill_type=None, min_cfvo=None, axis_position=None, negative_bar_format=None, bar_border=None, axis_color=None, max_length=None, show_value=None, **kw): """ Associative dict for storing property values """ self.container = {} """ DataBar - a model defined in Swagger """ self.container['direction'] = None self.container['max_cfvo'] = None self.container['color'] = None self.container['min_length'] = None self.container['bar_fill_type'] = None self.container['min_cfvo'] = None self.container['axis_position'] = None self.container['negative_bar_format'] = None self.container['bar_border'] = None self.container['axis_color'] = None self.container['max_length'] = None self.container['show_value'] = None if direction is not None: self.direction = direction if max_cfvo is not None: self.max_cfvo = max_cfvo if color is not None: self.color = color if min_length is not None: self.min_length = min_length if bar_fill_type is not None: self.bar_fill_type = bar_fill_type if min_cfvo is not None: self.min_cfvo = min_cfvo if axis_position is not None: self.axis_position = axis_position if negative_bar_format is not None: self.negative_bar_format = negative_bar_format if bar_border is not None: self.bar_border = bar_border if axis_color is not None: self.axis_color = axis_color if max_length is not None: self.max_length = max_length if show_value is not None: self.show_value = show_value @property def direction(self): """ Gets the direction of this DataBar. Gets or sets the direction the databar is displayed. :return: The direction of this DataBar. :rtype: str """ return self.container['direction'] @direction.setter def direction(self, direction): """ Sets the direction of this DataBar. Gets or sets the direction the databar is displayed. :param direction: The direction of this DataBar. :type: str """ self.container['direction'] = direction @property def max_cfvo(self): """ Gets the max_cfvo of this DataBar. Get or set this DataBar's max value object. Cannot set null or CFValueObject with type FormatConditionValueType.Min to it. :return: The max_cfvo of this DataBar. :rtype: ConditionalFormattingValue """ return self.container['max_cfvo'] @max_cfvo.setter def max_cfvo(self, max_cfvo): """ Sets the max_cfvo of this DataBar. Get or set this DataBar's max value object. Cannot set null or CFValueObject with type FormatConditionValueType.Min to it. :param max_cfvo: The max_cfvo of this DataBar. :type: ConditionalFormattingValue """ self.container['max_cfvo'] = max_cfvo @property def color(self): """ Gets the color of this DataBar. Get or set this DataBar's Color. :return: The color of this DataBar. :rtype: Color """ return self.container['color'] @color.setter def color(self, color): """ Sets the color of this DataBar. Get or set this DataBar's Color. :param color: The color of this DataBar. :type: Color """ self.container['color'] = color @property def min_length(self): """ Gets the min_length of this DataBar. Represents the min length of data bar . :return: The min_length of this DataBar. :rtype: int """ return self.container['min_length'] @min_length.setter def min_length(self, min_length): """ Sets the min_length of this DataBar. Represents the min length of data bar . :param min_length: The min_length of this DataBar. :type: int """ self.container['min_length'] = min_length @property def bar_fill_type(self): """ Gets the bar_fill_type of this DataBar. Gets or sets how a data bar is filled with color. :return: The bar_fill_type of this DataBar. :rtype: str """ return self.container['bar_fill_type'] @bar_fill_type.setter def bar_fill_type(self, bar_fill_type): """ Sets the bar_fill_type of this DataBar. Gets or sets how a data bar is filled with color. :param bar_fill_type: The bar_fill_type of this DataBar. :type: str """ self.container['bar_fill_type'] = bar_fill_type @property def min_cfvo(self): """ Gets the min_cfvo of this DataBar. Get or set this DataBar's min value object. Cannot set null or CFValueObject with type FormatConditionValueType.Max to it. :return: The min_cfvo of this DataBar. :rtype: ConditionalFormattingValue """ return self.container['min_cfvo'] @min_cfvo.setter def min_cfvo(self, min_cfvo): """ Sets the min_cfvo of this DataBar. Get or set this DataBar's min value object. Cannot set null or CFValueObject with type FormatConditionValueType.Max to it. :param min_cfvo: The min_cfvo of this DataBar. :type: ConditionalFormattingValue """ self.container['min_cfvo'] = min_cfvo @property def axis_position(self): """ Gets the axis_position of this DataBar. Gets or sets the position of the axis of the data bars specified by a conditional formatting rule. :return: The axis_position of this DataBar. :rtype: str """ return self.container['axis_position'] @axis_position.setter def axis_position(self, axis_position): """ Sets the axis_position of this DataBar. Gets or sets the position of the axis of the data bars specified by a conditional formatting rule. :param axis_position: The axis_position of this DataBar. :type: str """ self.container['axis_position'] = axis_position @property def negative_bar_format(self): """ Gets the negative_bar_format of this DataBar. Gets the NegativeBarFormat object associated with a data bar conditional formatting rule. :return: The negative_bar_format of this DataBar. :rtype: NegativeBarFormat """ return self.container['negative_bar_format'] @negative_bar_format.setter def negative_bar_format(self, negative_bar_format): """ Sets the negative_bar_format of this DataBar. Gets the NegativeBarFormat object associated with a data bar conditional formatting rule. :param negative_bar_format: The negative_bar_format of this DataBar. :type: NegativeBarFormat """ self.container['negative_bar_format'] = negative_bar_format @property def bar_border(self): """ Gets the bar_border of this DataBar. Gets an object that specifies the border of a data bar. :return: The bar_border of this DataBar. :rtype: DataBarBorder """ return self.container['bar_border'] @bar_border.setter def bar_border(self, bar_border): """ Sets the bar_border of this DataBar. Gets an object that specifies the border of a data bar. :param bar_border: The bar_border of this DataBar. :type: DataBarBorder """ self.container['bar_border'] = bar_border @property def axis_color(self): """ Gets the axis_color of this DataBar. Gets the color of the axis for cells with conditional formatting as data bars. :return: The axis_color of this DataBar. :rtype: Color """ return self.container['axis_color'] @axis_color.setter def axis_color(self, axis_color): """ Sets the axis_color of this DataBar. Gets the color of the axis for cells with conditional formatting as data bars. :param axis_color: The axis_color of this DataBar. :type: Color """ self.container['axis_color'] = axis_color @property def max_length(self): """ Gets the max_length of this DataBar. Represents the max length of data bar . :return: The max_length of this DataBar. :rtype: int """ return self.container['max_length'] @max_length.setter def max_length(self, max_length): """ Sets the max_length of this DataBar. Represents the max length of data bar . :param max_length: The max_length of this DataBar. :type: int """ self.container['max_length'] = max_length @property def show_value(self): """ Gets the show_value of this DataBar. Get or set the flag indicating whether to show the values of the cells on which this data bar is applied. Default value is true. :return: The show_value of this DataBar. :rtype: bool """ return self.container['show_value'] @show_value.setter def show_value(self, show_value): """ Sets the show_value of this DataBar. Get or set the flag indicating whether to show the values of the cells on which this data bar is applied. Default value is true. :param show_value: The show_value of this DataBar. :type: bool """ self.container['show_value'] = show_value def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.get_swagger_types()): value = self.get_from_container(attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, DataBar): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
# coding: utf-8 """ Copyright (c) 2021 Aspose.Cells Cloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE """ from pprint import pformat from six import iteritems import re class DataBar(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'direction': 'str', 'max_cfvo': 'ConditionalFormattingValue', 'color': 'Color', 'min_length': 'int', 'bar_fill_type': 'str', 'min_cfvo': 'ConditionalFormattingValue', 'axis_position': 'str', 'negative_bar_format': 'NegativeBarFormat', 'bar_border': 'DataBarBorder', 'axis_color': 'Color', 'max_length': 'int', 'show_value': 'bool' } attribute_map = { 'direction': 'Direction', 'max_cfvo': 'MaxCfvo', 'color': 'Color', 'min_length': 'MinLength', 'bar_fill_type': 'BarFillType', 'min_cfvo': 'MinCfvo', 'axis_position': 'AxisPosition', 'negative_bar_format': 'NegativeBarFormat', 'bar_border': 'BarBorder', 'axis_color': 'AxisColor', 'max_length': 'MaxLength', 'show_value': 'ShowValue' } @staticmethod def get_swagger_types(): return DataBar.swagger_types @staticmethod def get_attribute_map(): return DataBar.attribute_map def get_from_container(self, attr): if attr in self.container: return self.container[attr] return None def __init__(self, direction=None, max_cfvo=None, color=None, min_length=None, bar_fill_type=None, min_cfvo=None, axis_position=None, negative_bar_format=None, bar_border=None, axis_color=None, max_length=None, show_value=None, **kw): """ Associative dict for storing property values """ self.container = {} """ DataBar - a model defined in Swagger """ self.container['direction'] = None self.container['max_cfvo'] = None self.container['color'] = None self.container['min_length'] = None self.container['bar_fill_type'] = None self.container['min_cfvo'] = None self.container['axis_position'] = None self.container['negative_bar_format'] = None self.container['bar_border'] = None self.container['axis_color'] = None self.container['max_length'] = None self.container['show_value'] = None if direction is not None: self.direction = direction if max_cfvo is not None: self.max_cfvo = max_cfvo if color is not None: self.color = color if min_length is not None: self.min_length = min_length if bar_fill_type is not None: self.bar_fill_type = bar_fill_type if min_cfvo is not None: self.min_cfvo = min_cfvo if axis_position is not None: self.axis_position = axis_position if negative_bar_format is not None: self.negative_bar_format = negative_bar_format if bar_border is not None: self.bar_border = bar_border if axis_color is not None: self.axis_color = axis_color if max_length is not None: self.max_length = max_length if show_value is not None: self.show_value = show_value @property def direction(self): """ Gets the direction of this DataBar. Gets or sets the direction the databar is displayed. :return: The direction of this DataBar. :rtype: str """ return self.container['direction'] @direction.setter def direction(self, direction): """ Sets the direction of this DataBar. Gets or sets the direction the databar is displayed. :param direction: The direction of this DataBar. :type: str """ self.container['direction'] = direction @property def max_cfvo(self): """ Gets the max_cfvo of this DataBar. Get or set this DataBar's max value object. Cannot set null or CFValueObject with type FormatConditionValueType.Min to it. :return: The max_cfvo of this DataBar. :rtype: ConditionalFormattingValue """ return self.container['max_cfvo'] @max_cfvo.setter def max_cfvo(self, max_cfvo): """ Sets the max_cfvo of this DataBar. Get or set this DataBar's max value object. Cannot set null or CFValueObject with type FormatConditionValueType.Min to it. :param max_cfvo: The max_cfvo of this DataBar. :type: ConditionalFormattingValue """ self.container['max_cfvo'] = max_cfvo @property def color(self): """ Gets the color of this DataBar. Get or set this DataBar's Color. :return: The color of this DataBar. :rtype: Color """ return self.container['color'] @color.setter def color(self, color): """ Sets the color of this DataBar. Get or set this DataBar's Color. :param color: The color of this DataBar. :type: Color """ self.container['color'] = color @property def min_length(self): """ Gets the min_length of this DataBar. Represents the min length of data bar . :return: The min_length of this DataBar. :rtype: int """ return self.container['min_length'] @min_length.setter def min_length(self, min_length): """ Sets the min_length of this DataBar. Represents the min length of data bar . :param min_length: The min_length of this DataBar. :type: int """ self.container['min_length'] = min_length @property def bar_fill_type(self): """ Gets the bar_fill_type of this DataBar. Gets or sets how a data bar is filled with color. :return: The bar_fill_type of this DataBar. :rtype: str """ return self.container['bar_fill_type'] @bar_fill_type.setter def bar_fill_type(self, bar_fill_type): """ Sets the bar_fill_type of this DataBar. Gets or sets how a data bar is filled with color. :param bar_fill_type: The bar_fill_type of this DataBar. :type: str """ self.container['bar_fill_type'] = bar_fill_type @property def min_cfvo(self): """ Gets the min_cfvo of this DataBar. Get or set this DataBar's min value object. Cannot set null or CFValueObject with type FormatConditionValueType.Max to it. :return: The min_cfvo of this DataBar. :rtype: ConditionalFormattingValue """ return self.container['min_cfvo'] @min_cfvo.setter def min_cfvo(self, min_cfvo): """ Sets the min_cfvo of this DataBar. Get or set this DataBar's min value object. Cannot set null or CFValueObject with type FormatConditionValueType.Max to it. :param min_cfvo: The min_cfvo of this DataBar. :type: ConditionalFormattingValue """ self.container['min_cfvo'] = min_cfvo @property def axis_position(self): """ Gets the axis_position of this DataBar. Gets or sets the position of the axis of the data bars specified by a conditional formatting rule. :return: The axis_position of this DataBar. :rtype: str """ return self.container['axis_position'] @axis_position.setter def axis_position(self, axis_position): """ Sets the axis_position of this DataBar. Gets or sets the position of the axis of the data bars specified by a conditional formatting rule. :param axis_position: The axis_position of this DataBar. :type: str """ self.container['axis_position'] = axis_position @property def negative_bar_format(self): """ Gets the negative_bar_format of this DataBar. Gets the NegativeBarFormat object associated with a data bar conditional formatting rule. :return: The negative_bar_format of this DataBar. :rtype: NegativeBarFormat """ return self.container['negative_bar_format'] @negative_bar_format.setter def negative_bar_format(self, negative_bar_format): """ Sets the negative_bar_format of this DataBar. Gets the NegativeBarFormat object associated with a data bar conditional formatting rule. :param negative_bar_format: The negative_bar_format of this DataBar. :type: NegativeBarFormat """ self.container['negative_bar_format'] = negative_bar_format @property def bar_border(self): """ Gets the bar_border of this DataBar. Gets an object that specifies the border of a data bar. :return: The bar_border of this DataBar. :rtype: DataBarBorder """ return self.container['bar_border'] @bar_border.setter def bar_border(self, bar_border): """ Sets the bar_border of this DataBar. Gets an object that specifies the border of a data bar. :param bar_border: The bar_border of this DataBar. :type: DataBarBorder """ self.container['bar_border'] = bar_border @property def axis_color(self): """ Gets the axis_color of this DataBar. Gets the color of the axis for cells with conditional formatting as data bars. :return: The axis_color of this DataBar. :rtype: Color """ return self.container['axis_color'] @axis_color.setter def axis_color(self, axis_color): """ Sets the axis_color of this DataBar. Gets the color of the axis for cells with conditional formatting as data bars. :param axis_color: The axis_color of this DataBar. :type: Color """ self.container['axis_color'] = axis_color @property def max_length(self): """ Gets the max_length of this DataBar. Represents the max length of data bar . :return: The max_length of this DataBar. :rtype: int """ return self.container['max_length'] @max_length.setter def max_length(self, max_length): """ Sets the max_length of this DataBar. Represents the max length of data bar . :param max_length: The max_length of this DataBar. :type: int """ self.container['max_length'] = max_length @property def show_value(self): """ Gets the show_value of this DataBar. Get or set the flag indicating whether to show the values of the cells on which this data bar is applied. Default value is true. :return: The show_value of this DataBar. :rtype: bool """ return self.container['show_value'] @show_value.setter def show_value(self, show_value): """ Sets the show_value of this DataBar. Get or set the flag indicating whether to show the values of the cells on which this data bar is applied. Default value is true. :param show_value: The show_value of this DataBar. :type: bool """ self.container['show_value'] = show_value def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.get_swagger_types()): value = self.get_from_container(attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, DataBar): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
en
0.657082
# coding: utf-8 Copyright (c) 2021 Aspose.Cells Cloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. Associative dict for storing property values DataBar - a model defined in Swagger Gets the direction of this DataBar. Gets or sets the direction the databar is displayed. :return: The direction of this DataBar. :rtype: str Sets the direction of this DataBar. Gets or sets the direction the databar is displayed. :param direction: The direction of this DataBar. :type: str Gets the max_cfvo of this DataBar. Get or set this DataBar's max value object. Cannot set null or CFValueObject with type FormatConditionValueType.Min to it. :return: The max_cfvo of this DataBar. :rtype: ConditionalFormattingValue Sets the max_cfvo of this DataBar. Get or set this DataBar's max value object. Cannot set null or CFValueObject with type FormatConditionValueType.Min to it. :param max_cfvo: The max_cfvo of this DataBar. :type: ConditionalFormattingValue Gets the color of this DataBar. Get or set this DataBar's Color. :return: The color of this DataBar. :rtype: Color Sets the color of this DataBar. Get or set this DataBar's Color. :param color: The color of this DataBar. :type: Color Gets the min_length of this DataBar. Represents the min length of data bar . :return: The min_length of this DataBar. :rtype: int Sets the min_length of this DataBar. Represents the min length of data bar . :param min_length: The min_length of this DataBar. :type: int Gets the bar_fill_type of this DataBar. Gets or sets how a data bar is filled with color. :return: The bar_fill_type of this DataBar. :rtype: str Sets the bar_fill_type of this DataBar. Gets or sets how a data bar is filled with color. :param bar_fill_type: The bar_fill_type of this DataBar. :type: str Gets the min_cfvo of this DataBar. Get or set this DataBar's min value object. Cannot set null or CFValueObject with type FormatConditionValueType.Max to it. :return: The min_cfvo of this DataBar. :rtype: ConditionalFormattingValue Sets the min_cfvo of this DataBar. Get or set this DataBar's min value object. Cannot set null or CFValueObject with type FormatConditionValueType.Max to it. :param min_cfvo: The min_cfvo of this DataBar. :type: ConditionalFormattingValue Gets the axis_position of this DataBar. Gets or sets the position of the axis of the data bars specified by a conditional formatting rule. :return: The axis_position of this DataBar. :rtype: str Sets the axis_position of this DataBar. Gets or sets the position of the axis of the data bars specified by a conditional formatting rule. :param axis_position: The axis_position of this DataBar. :type: str Gets the negative_bar_format of this DataBar. Gets the NegativeBarFormat object associated with a data bar conditional formatting rule. :return: The negative_bar_format of this DataBar. :rtype: NegativeBarFormat Sets the negative_bar_format of this DataBar. Gets the NegativeBarFormat object associated with a data bar conditional formatting rule. :param negative_bar_format: The negative_bar_format of this DataBar. :type: NegativeBarFormat Gets the bar_border of this DataBar. Gets an object that specifies the border of a data bar. :return: The bar_border of this DataBar. :rtype: DataBarBorder Sets the bar_border of this DataBar. Gets an object that specifies the border of a data bar. :param bar_border: The bar_border of this DataBar. :type: DataBarBorder Gets the axis_color of this DataBar. Gets the color of the axis for cells with conditional formatting as data bars. :return: The axis_color of this DataBar. :rtype: Color Sets the axis_color of this DataBar. Gets the color of the axis for cells with conditional formatting as data bars. :param axis_color: The axis_color of this DataBar. :type: Color Gets the max_length of this DataBar. Represents the max length of data bar . :return: The max_length of this DataBar. :rtype: int Sets the max_length of this DataBar. Represents the max length of data bar . :param max_length: The max_length of this DataBar. :type: int Gets the show_value of this DataBar. Get or set the flag indicating whether to show the values of the cells on which this data bar is applied. Default value is true. :return: The show_value of this DataBar. :rtype: bool Sets the show_value of this DataBar. Get or set the flag indicating whether to show the values of the cells on which this data bar is applied. Default value is true. :param show_value: The show_value of this DataBar. :type: bool Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal
1.558247
2
tests/features/test_not_modified.py
a-musing-moose/bonobo
0
6617630
<reponame>a-musing-moose/bonobo from bonobo.constants import NOT_MODIFIED from bonobo.util.testing import BufferingNodeExecutionContext def useless(*args, **kwargs): return NOT_MODIFIED def test_not_modified(): input_messages = [ ('foo', 'bar'), ('foo', 'baz'), ] with BufferingNodeExecutionContext(useless) as context: context.write_sync(*input_messages) assert context.get_buffer() == input_messages
from bonobo.constants import NOT_MODIFIED from bonobo.util.testing import BufferingNodeExecutionContext def useless(*args, **kwargs): return NOT_MODIFIED def test_not_modified(): input_messages = [ ('foo', 'bar'), ('foo', 'baz'), ] with BufferingNodeExecutionContext(useless) as context: context.write_sync(*input_messages) assert context.get_buffer() == input_messages
none
1
2.211999
2
src/questionnaires/models.py
panchyo0/questionnaire_dataDriving
0
6617631
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.conf import settings # Questionnaire model. #Question model. class Question(models.Model): question_id=models.AutoField(primary_key=True) create_at=models.DateTimeField(auto_now_add=True,auto_now=False) question=models.CharField(max_length=5000,null=False,blank=False) def __str__(self): return self.question def __getitem__(self, key): return self #questionnaire has ManyToMany relation with question. class Questionnaire(models.Model): Id=models.AutoField(primary_key=True) name=models.CharField(max_length=1024) description=models.CharField(max_length=5000) is_open=models.BooleanField(default=False) start_date=models.DateTimeField(auto_now_add=True,auto_now=False) question_members=models.ManyToManyField(Question,through='Questionnaire_Question') def __str__(self): return self.name #related question and questionnaire according ID class Questionnaire_Question(models.Model): questionnaire_id=models.ForeignKey(Questionnaire) question_id=models.ForeignKey(Question) #answer model. class Answer(models.Model): answer_id=models.AutoField(primary_key=True) question=models.ForeignKey(Question) answer=models.CharField(max_length=5000,null=False,blank=False) def __str__(self): return self.answer
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.conf import settings # Questionnaire model. #Question model. class Question(models.Model): question_id=models.AutoField(primary_key=True) create_at=models.DateTimeField(auto_now_add=True,auto_now=False) question=models.CharField(max_length=5000,null=False,blank=False) def __str__(self): return self.question def __getitem__(self, key): return self #questionnaire has ManyToMany relation with question. class Questionnaire(models.Model): Id=models.AutoField(primary_key=True) name=models.CharField(max_length=1024) description=models.CharField(max_length=5000) is_open=models.BooleanField(default=False) start_date=models.DateTimeField(auto_now_add=True,auto_now=False) question_members=models.ManyToManyField(Question,through='Questionnaire_Question') def __str__(self): return self.name #related question and questionnaire according ID class Questionnaire_Question(models.Model): questionnaire_id=models.ForeignKey(Questionnaire) question_id=models.ForeignKey(Question) #answer model. class Answer(models.Model): answer_id=models.AutoField(primary_key=True) question=models.ForeignKey(Question) answer=models.CharField(max_length=5000,null=False,blank=False) def __str__(self): return self.answer
en
0.844104
# -*- coding: utf-8 -*- # Questionnaire model. #Question model. #questionnaire has ManyToMany relation with question. #related question and questionnaire according ID #answer model.
2.413812
2
app/lib/wip/__init__.py
MichaelCurrin/twitterverse
10
6617632
<gh_stars>1-10 """ Initialization file for wip directory. """
""" Initialization file for wip directory. """
en
0.924339
Initialization file for wip directory.
1.126765
1
bcbio/variation/naming.py
a113n/bcbio-nextgen
418
6617633
"""Fix chromosome naming incompatibilities for common issues, like hg19/GRCh37. Fixes issues relating to chr1 versus 1 naming. Uses <NAME>'s great collection of contig mappings: https://github.com/dpryan79/ChromosomeMappings """ import os import requests from bcbio import utils from bcbio.bam import ref from bcbio.distributed.transaction import file_transaction from bcbio.variation import vcfutils # ## Cached results GMAP = {} # read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_ensembl2UCSC.txt") GMAP["hg19"] = {'GL000219.1': 'chrUn_gl000219', 'GL000192.1': 'chr1_gl000192_random', 'GL000236.1': 'chrUn_gl000236', 'GL000211.1': 'chrUn_gl000211', 'GL000234.1': 'chrUn_gl000234', '20': 'chr20', '21': 'chr21', '22': 'chr22', 'GL000196.1': 'chr8_gl000196_random', 'GL000213.1': 'chrUn_gl000213', 'GL000205.1': 'chr17_gl000205_random', '4': 'chr4', 'GL000222.1': 'chrUn_gl000222', 'GL000215.1': 'chrUn_gl000215', '8': 'chr8', 'GL000232.1': 'chrUn_gl000232', 'GL000242.1': 'chrUn_gl000242', 'GL000244.1': 'chrUn_gl000244', 'GL000223.1': 'chrUn_gl000223', 'GL000229.1': 'chrUn_gl000229', 'GL000240.1': 'chrUn_gl000240', 'X': 'chrX', 'GL000202.1': 'chr11_gl000202_random', 'GL000217.1': 'chrUn_gl000217', 'GL000200.1': 'chr9_gl000200_random', 'GL000230.1': 'chrUn_gl000230', 'GL000206.1': 'chr17_gl000206_random', 'HSCHR6_MHC_QBL': 'chr6_qbl_hap6', 'HSCHR6_MHC_MANN': 'chr6_mann_hap4', 'GL000237.1': 'chrUn_gl000237', 'GL000204.1': 'chr17_gl000204_random', 'GL000235.1': 'chrUn_gl000235', 'HSCHR6_MHC_APD': 'chr6_apd_hap1', 'HSCHR6_MHC_COX': 'chr6_cox_hap2', '3': 'chr3', '7': 'chr7', 'GL000233.1': 'chrUn_gl000233', 'GL000221.1': 'chrUn_gl000221', 'GL000220.1': 'chrUn_gl000220', 'GL000245.1': 'chrUn_gl000245', 'GL000228.1': 'chrUn_gl000228', 'GL000231.1': 'chrUn_gl000231', 'MT': 'chrM', 'HSCHR6_MHC_SSTO': 'chr6_ssto_hap7', 'GL000238.1': 'chrUn_gl000238', 'GL000195.1': 'chr7_gl000195_random', 'GL000249.1': 'chrUn_gl000249', '2': 'chr2', '6': 'chr6', 'GL000247.1': 'chrUn_gl000247', 'GL000199.1': 'chr9_gl000199_random', 'HSCHR6_MHC_DBB': 'chr6_dbb_hap3', 'GL000246.1': 'chrUn_gl000246', 'GL000225.1': 'chrUn_gl000225', 'HSCHR4_1': 'chr4_ctg9_hap1', 'GL000227.1': 'chrUn_gl000227', '11': 'chr11', '10': 'chr10', '13': 'chr13', '12': 'chr12', '15': 'chr15', '14': 'chr14', '17': 'chr17', '16': 'chr16', '19': 'chr19', '18': 'chr18', 'GL000193.1': 'chr4_gl000193_random', 'GL000210.1': 'chr21_gl000210_random', 'GL000239.1': 'chrUn_gl000239', 'GL000191.1': 'chr1_gl000191_random', 'HSCHR17_1': 'chr17_ctg5_hap1', 'GL000194.1': 'chr4_gl000194_random', 'GL000212.1': 'chrUn_gl000212', 'GL000248.1': 'chrUn_gl000248', 'GL000197.1': 'chr8_gl000197_random', '1': 'chr1', '5': 'chr5', 'GL000208.1': 'chr19_gl000208_random', '9': 'chr9', 'GL000214.1': 'chrUn_gl000214', 'GL000224.1': 'chrUn_gl000224', 'GL000243.1': 'chrUn_gl000243', 'HSCHR6_MHC_MCF': 'chr6_mcf_hap5', 'GL000209.1': 'chr19_gl000209_random', 'GL000203.1': 'chr17_gl000203_random', 'GL000226.1': 'chrUn_gl000226', 'GL000241.1': 'chrUn_gl000241', 'Y': 'chrY', 'GL000201.1': 'chr9_gl000201_random', 'GL000198.1': 'chr9_gl000198_random', 'GL000216.1': 'chrUn_gl000216', 'GL000218.1': 'chrUn_gl000218', 'GL000207.1': 'chr18_gl000207_random'} #read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_UCSC2ensembl.txt") GMAP["GRCh37"] = {'chr19_gl000208_random': 'GL000208.1', 'chr21_gl000210_random': 'GL000210.1', 'chr6_apd_hap1': 'HSCHR6_MHC_APD', 'chr13': '13', 'chr12': '12', 'chr11': '11', 'chr10': '10', 'chr17': '17', 'chr16': '16', 'chr15': '15', 'chr14': '14', 'chr19': '19', 'chr18': '18', 'chr9_gl000198_random': 'GL000198.1', 'chrUn_gl000239': 'GL000239.1', 'chrUn_gl000238': 'GL000238.1', 'chrUn_gl000233': 'GL000233.1', 'chrUn_gl000232': 'GL000232.1', 'chrUn_gl000231': 'GL000231.1', 'chrUn_gl000230': 'GL000230.1', 'chrUn_gl000237': 'GL000237.1', 'chrUn_gl000236': 'GL000236.1', 'chrUn_gl000235': 'GL000235.1', 'chrUn_gl000234': 'GL000234.1', 'chr6_qbl_hap6': 'HSCHR6_MHC_QBL', 'chr11_gl000202_random': 'GL000202.1', 'chr17_gl000206_random': 'GL000206.1', 'chr6_cox_hap2': 'HSCHR6_MHC_COX', 'chr4_gl000193_random': 'GL000193.1', 'chrUn_gl000248': 'GL000248.1', 'chrUn_gl000249': 'GL000249.1', 'chrUn_gl000246': 'GL000246.1', 'chrUn_gl000247': 'GL000247.1', 'chrUn_gl000244': 'GL000244.1', 'chrUn_gl000245': 'GL000245.1', 'chrUn_gl000242': 'GL000242.1', 'chrUn_gl000243': 'GL000243.1', 'chrUn_gl000240': 'GL000240.1', 'chrUn_gl000241': 'GL000241.1', 'chr17_gl000204_random': 'GL000204.1', 'chr17_ctg5_hap1': 'HSCHR17_1', 'chr17_gl000205_random': 'GL000205.1', 'chr9_gl000199_random': 'GL000199.1', 'chr9_gl000201_random': 'GL000201.1', 'chr8': '8', 'chr6_ssto_hap7': 'HSCHR6_MHC_SSTO', 'chr8_gl000197_random': 'GL000197.1', 'chr6_dbb_hap3': 'HSCHR6_MHC_DBB', 'chr7_gl000195_random': 'GL000195.1', 'chr1_gl000191_random': 'GL000191.1', 'chr4_ctg9_hap1': 'HSCHR4_1', 'chr3': '3', 'chr2': '2', 'chr1': '1', 'chr17_gl000203_random': 'GL000203.1', 'chrUn_gl000225': 'GL000225.1', 'chrY': 'Y', 'chrX': 'X', 'chr9_gl000200_random': 'GL000200.1', 'chr9': '9', 'chrM': 'MT', 'chr8_gl000196_random': 'GL000196.1', 'chr6_mann_hap4': 'HSCHR6_MHC_MANN', 'chrUn_gl000211': 'GL000211.1', 'chrUn_gl000213': 'GL000213.1', 'chrUn_gl000212': 'GL000212.1', 'chrUn_gl000215': 'GL000215.1', 'chrUn_gl000214': 'GL000214.1', 'chrUn_gl000217': 'GL000217.1', 'chrUn_gl000216': 'GL000216.1', 'chrUn_gl000219': 'GL000219.1', 'chrUn_gl000218': 'GL000218.1', 'chr19_gl000209_random': 'GL000209.1', 'chr22': '22', 'chr20': '20', 'chr21': '21', 'chr6_mcf_hap5': 'HSCHR6_MHC_MCF', 'chr7': '7', 'chr6': '6', 'chr5': '5', 'chr4': '4', 'chrUn_gl000228': 'GL000228.1', 'chrUn_gl000229': 'GL000229.1', 'chr1_gl000192_random': 'GL000192.1', 'chrUn_gl000224': 'GL000224.1', 'chr4_gl000194_random': 'GL000194.1', 'chrUn_gl000226': 'GL000226.1', 'chrUn_gl000227': 'GL000227.1', 'chrUn_gl000220': 'GL000220.1', 'chrUn_gl000221': 'GL000221.1', 'chrUn_gl000222': 'GL000222.1', 'chrUn_gl000223': 'GL000223.1', 'chr18_gl000207_random': 'GL000207.1'} def handle_synonyms(in_file, ref_file, genome_build, work_dir, data): """Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. """ if genome_build in GMAP and ref_file: mappings = GMAP[genome_build] contigs = set([c.name for c in ref.file_contigs(ref_file)]) out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_exists(out_file): if out_file.endswith(".gz"): out_file = out_file.replace(".gz", "") needs_bgzip = True else: needs_bgzip = False checked_file = "%s.checked" % utils.splitext_plus(out_file)[0] if not _matches_contigs(in_file, contigs, checked_file): with file_transaction(data, out_file) as tx_out_file: _write_newname_file(in_file, tx_out_file, mappings) if needs_bgzip: out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file return in_file def _write_newname_file(in_file, out_file, mappings): """Re-write an input file with contigs matching the correct reference. """ with utils.open_gzipsafe(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.split("\t") new_contig = mappings.get(parts[0]) if new_contig: parts[0] = new_contig out_handle.write("\t".join(parts)) def _matches_contigs(in_file, contigs, checked_file): """Check if the contigs in the input file match the defined contigs in the reference genome. """ tocheck_contigs = 2 if utils.file_exists(checked_file): with open(checked_file) as in_handle: return in_handle.read().strip() == "match" else: with utils.open_gzipsafe(in_file) as in_handle: to_check = set([]) for line in in_handle: if not line.startswith("#"): to_check.add(line.split()[0]) if len(to_check) >= tocheck_contigs: break with open(checked_file, "w") as out_handle: if any([c not in contigs for c in to_check]): out_handle.write("different") return False else: out_handle.write("match") return True # ## Retrieval of mappings def read_mapping(url): mappings = {} for line in requests.get(url).text.split("\n"): parts = line.strip().split() if len(parts) == 2: first, second = parts mappings[str(first)] = str(second) return mappings
"""Fix chromosome naming incompatibilities for common issues, like hg19/GRCh37. Fixes issues relating to chr1 versus 1 naming. Uses <NAME>'s great collection of contig mappings: https://github.com/dpryan79/ChromosomeMappings """ import os import requests from bcbio import utils from bcbio.bam import ref from bcbio.distributed.transaction import file_transaction from bcbio.variation import vcfutils # ## Cached results GMAP = {} # read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_ensembl2UCSC.txt") GMAP["hg19"] = {'GL000219.1': 'chrUn_gl000219', 'GL000192.1': 'chr1_gl000192_random', 'GL000236.1': 'chrUn_gl000236', 'GL000211.1': 'chrUn_gl000211', 'GL000234.1': 'chrUn_gl000234', '20': 'chr20', '21': 'chr21', '22': 'chr22', 'GL000196.1': 'chr8_gl000196_random', 'GL000213.1': 'chrUn_gl000213', 'GL000205.1': 'chr17_gl000205_random', '4': 'chr4', 'GL000222.1': 'chrUn_gl000222', 'GL000215.1': 'chrUn_gl000215', '8': 'chr8', 'GL000232.1': 'chrUn_gl000232', 'GL000242.1': 'chrUn_gl000242', 'GL000244.1': 'chrUn_gl000244', 'GL000223.1': 'chrUn_gl000223', 'GL000229.1': 'chrUn_gl000229', 'GL000240.1': 'chrUn_gl000240', 'X': 'chrX', 'GL000202.1': 'chr11_gl000202_random', 'GL000217.1': 'chrUn_gl000217', 'GL000200.1': 'chr9_gl000200_random', 'GL000230.1': 'chrUn_gl000230', 'GL000206.1': 'chr17_gl000206_random', 'HSCHR6_MHC_QBL': 'chr6_qbl_hap6', 'HSCHR6_MHC_MANN': 'chr6_mann_hap4', 'GL000237.1': 'chrUn_gl000237', 'GL000204.1': 'chr17_gl000204_random', 'GL000235.1': 'chrUn_gl000235', 'HSCHR6_MHC_APD': 'chr6_apd_hap1', 'HSCHR6_MHC_COX': 'chr6_cox_hap2', '3': 'chr3', '7': 'chr7', 'GL000233.1': 'chrUn_gl000233', 'GL000221.1': 'chrUn_gl000221', 'GL000220.1': 'chrUn_gl000220', 'GL000245.1': 'chrUn_gl000245', 'GL000228.1': 'chrUn_gl000228', 'GL000231.1': 'chrUn_gl000231', 'MT': 'chrM', 'HSCHR6_MHC_SSTO': 'chr6_ssto_hap7', 'GL000238.1': 'chrUn_gl000238', 'GL000195.1': 'chr7_gl000195_random', 'GL000249.1': 'chrUn_gl000249', '2': 'chr2', '6': 'chr6', 'GL000247.1': 'chrUn_gl000247', 'GL000199.1': 'chr9_gl000199_random', 'HSCHR6_MHC_DBB': 'chr6_dbb_hap3', 'GL000246.1': 'chrUn_gl000246', 'GL000225.1': 'chrUn_gl000225', 'HSCHR4_1': 'chr4_ctg9_hap1', 'GL000227.1': 'chrUn_gl000227', '11': 'chr11', '10': 'chr10', '13': 'chr13', '12': 'chr12', '15': 'chr15', '14': 'chr14', '17': 'chr17', '16': 'chr16', '19': 'chr19', '18': 'chr18', 'GL000193.1': 'chr4_gl000193_random', 'GL000210.1': 'chr21_gl000210_random', 'GL000239.1': 'chrUn_gl000239', 'GL000191.1': 'chr1_gl000191_random', 'HSCHR17_1': 'chr17_ctg5_hap1', 'GL000194.1': 'chr4_gl000194_random', 'GL000212.1': 'chrUn_gl000212', 'GL000248.1': 'chrUn_gl000248', 'GL000197.1': 'chr8_gl000197_random', '1': 'chr1', '5': 'chr5', 'GL000208.1': 'chr19_gl000208_random', '9': 'chr9', 'GL000214.1': 'chrUn_gl000214', 'GL000224.1': 'chrUn_gl000224', 'GL000243.1': 'chrUn_gl000243', 'HSCHR6_MHC_MCF': 'chr6_mcf_hap5', 'GL000209.1': 'chr19_gl000209_random', 'GL000203.1': 'chr17_gl000203_random', 'GL000226.1': 'chrUn_gl000226', 'GL000241.1': 'chrUn_gl000241', 'Y': 'chrY', 'GL000201.1': 'chr9_gl000201_random', 'GL000198.1': 'chr9_gl000198_random', 'GL000216.1': 'chrUn_gl000216', 'GL000218.1': 'chrUn_gl000218', 'GL000207.1': 'chr18_gl000207_random'} #read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_UCSC2ensembl.txt") GMAP["GRCh37"] = {'chr19_gl000208_random': 'GL000208.1', 'chr21_gl000210_random': 'GL000210.1', 'chr6_apd_hap1': 'HSCHR6_MHC_APD', 'chr13': '13', 'chr12': '12', 'chr11': '11', 'chr10': '10', 'chr17': '17', 'chr16': '16', 'chr15': '15', 'chr14': '14', 'chr19': '19', 'chr18': '18', 'chr9_gl000198_random': 'GL000198.1', 'chrUn_gl000239': 'GL000239.1', 'chrUn_gl000238': 'GL000238.1', 'chrUn_gl000233': 'GL000233.1', 'chrUn_gl000232': 'GL000232.1', 'chrUn_gl000231': 'GL000231.1', 'chrUn_gl000230': 'GL000230.1', 'chrUn_gl000237': 'GL000237.1', 'chrUn_gl000236': 'GL000236.1', 'chrUn_gl000235': 'GL000235.1', 'chrUn_gl000234': 'GL000234.1', 'chr6_qbl_hap6': 'HSCHR6_MHC_QBL', 'chr11_gl000202_random': 'GL000202.1', 'chr17_gl000206_random': 'GL000206.1', 'chr6_cox_hap2': 'HSCHR6_MHC_COX', 'chr4_gl000193_random': 'GL000193.1', 'chrUn_gl000248': 'GL000248.1', 'chrUn_gl000249': 'GL000249.1', 'chrUn_gl000246': 'GL000246.1', 'chrUn_gl000247': 'GL000247.1', 'chrUn_gl000244': 'GL000244.1', 'chrUn_gl000245': 'GL000245.1', 'chrUn_gl000242': 'GL000242.1', 'chrUn_gl000243': 'GL000243.1', 'chrUn_gl000240': 'GL000240.1', 'chrUn_gl000241': 'GL000241.1', 'chr17_gl000204_random': 'GL000204.1', 'chr17_ctg5_hap1': 'HSCHR17_1', 'chr17_gl000205_random': 'GL000205.1', 'chr9_gl000199_random': 'GL000199.1', 'chr9_gl000201_random': 'GL000201.1', 'chr8': '8', 'chr6_ssto_hap7': 'HSCHR6_MHC_SSTO', 'chr8_gl000197_random': 'GL000197.1', 'chr6_dbb_hap3': 'HSCHR6_MHC_DBB', 'chr7_gl000195_random': 'GL000195.1', 'chr1_gl000191_random': 'GL000191.1', 'chr4_ctg9_hap1': 'HSCHR4_1', 'chr3': '3', 'chr2': '2', 'chr1': '1', 'chr17_gl000203_random': 'GL000203.1', 'chrUn_gl000225': 'GL000225.1', 'chrY': 'Y', 'chrX': 'X', 'chr9_gl000200_random': 'GL000200.1', 'chr9': '9', 'chrM': 'MT', 'chr8_gl000196_random': 'GL000196.1', 'chr6_mann_hap4': 'HSCHR6_MHC_MANN', 'chrUn_gl000211': 'GL000211.1', 'chrUn_gl000213': 'GL000213.1', 'chrUn_gl000212': 'GL000212.1', 'chrUn_gl000215': 'GL000215.1', 'chrUn_gl000214': 'GL000214.1', 'chrUn_gl000217': 'GL000217.1', 'chrUn_gl000216': 'GL000216.1', 'chrUn_gl000219': 'GL000219.1', 'chrUn_gl000218': 'GL000218.1', 'chr19_gl000209_random': 'GL000209.1', 'chr22': '22', 'chr20': '20', 'chr21': '21', 'chr6_mcf_hap5': 'HSCHR6_MHC_MCF', 'chr7': '7', 'chr6': '6', 'chr5': '5', 'chr4': '4', 'chrUn_gl000228': 'GL000228.1', 'chrUn_gl000229': 'GL000229.1', 'chr1_gl000192_random': 'GL000192.1', 'chrUn_gl000224': 'GL000224.1', 'chr4_gl000194_random': 'GL000194.1', 'chrUn_gl000226': 'GL000226.1', 'chrUn_gl000227': 'GL000227.1', 'chrUn_gl000220': 'GL000220.1', 'chrUn_gl000221': 'GL000221.1', 'chrUn_gl000222': 'GL000222.1', 'chrUn_gl000223': 'GL000223.1', 'chr18_gl000207_random': 'GL000207.1'} def handle_synonyms(in_file, ref_file, genome_build, work_dir, data): """Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. """ if genome_build in GMAP and ref_file: mappings = GMAP[genome_build] contigs = set([c.name for c in ref.file_contigs(ref_file)]) out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_exists(out_file): if out_file.endswith(".gz"): out_file = out_file.replace(".gz", "") needs_bgzip = True else: needs_bgzip = False checked_file = "%s.checked" % utils.splitext_plus(out_file)[0] if not _matches_contigs(in_file, contigs, checked_file): with file_transaction(data, out_file) as tx_out_file: _write_newname_file(in_file, tx_out_file, mappings) if needs_bgzip: out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file return in_file def _write_newname_file(in_file, out_file, mappings): """Re-write an input file with contigs matching the correct reference. """ with utils.open_gzipsafe(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.split("\t") new_contig = mappings.get(parts[0]) if new_contig: parts[0] = new_contig out_handle.write("\t".join(parts)) def _matches_contigs(in_file, contigs, checked_file): """Check if the contigs in the input file match the defined contigs in the reference genome. """ tocheck_contigs = 2 if utils.file_exists(checked_file): with open(checked_file) as in_handle: return in_handle.read().strip() == "match" else: with utils.open_gzipsafe(in_file) as in_handle: to_check = set([]) for line in in_handle: if not line.startswith("#"): to_check.add(line.split()[0]) if len(to_check) >= tocheck_contigs: break with open(checked_file, "w") as out_handle: if any([c not in contigs for c in to_check]): out_handle.write("different") return False else: out_handle.write("match") return True # ## Retrieval of mappings def read_mapping(url): mappings = {} for line in requests.get(url).text.split("\n"): parts = line.strip().split() if len(parts) == 2: first, second = parts mappings[str(first)] = str(second) return mappings
en
0.753927
Fix chromosome naming incompatibilities for common issues, like hg19/GRCh37. Fixes issues relating to chr1 versus 1 naming. Uses <NAME>'s great collection of contig mappings: https://github.com/dpryan79/ChromosomeMappings # ## Cached results # read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_ensembl2UCSC.txt") #read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_UCSC2ensembl.txt") Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. Re-write an input file with contigs matching the correct reference. Check if the contigs in the input file match the defined contigs in the reference genome. # ## Retrieval of mappings
2.057254
2
tests/test_utils.py
WadeBarnes/aries-staticagent-python
0
6617634
<reponame>WadeBarnes/aries-staticagent-python<gh_stars>0 """ Test utilities. """ import re from aries_staticagent import utils REGEX = r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9]) (2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$' MATCH = re.compile(REGEX).match def test_timestamp(): """ Test that the timestamp looks right. """ timestamp = utils.timestamp() assert MATCH(timestamp)
""" Test utilities. """ import re from aries_staticagent import utils REGEX = r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9]) (2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$' MATCH = re.compile(REGEX).match def test_timestamp(): """ Test that the timestamp looks right. """ timestamp = utils.timestamp() assert MATCH(timestamp)
en
0.966896
Test utilities. Test that the timestamp looks right.
2.728053
3
powerpool/jobmanagers/switching_jobmanager.py
uingei/powerpool_20171208
39
6617635
import redis import simplejson import time import operator from cryptokit import bits_to_difficulty from gevent.event import Event from powerpool.lib import loop from powerpool.jobmanagers import Jobmanager from binascii import hexlify class MonitorNetworkMulti(Jobmanager): defaults = config = dict(jobmanagers=None, profit_poll_int=1, redis={}, margin_switch=1.2, exchange_manager={}) def __init__(self, config): self._configure(config) # Since some MonitorNetwork objs are polling and some aren't.... self.gl_methods = ['update_profit'] # Child jobmanagers self.jobmanagers = {} self.price_data = {} self.profit_data = {} self.next_network = None self.current_network = None # Currently active jobs keyed by their unique ID self.jobs = {} self.new_job = Event() self.redis = redis.Redis(**self.config['redis']) @property def latest_job(self): """ Proxy the jobmanager we're currently mining ons job """ return self.jobmanagers[self.current_network].latest_job @property def status(self): """ For display in the http monitor """ return dict(price_data=self.price_data, profit_data=self.profit_data, next_network=self.next_network, current_network=self.current_network) @loop(interval='profit_poll_int') def update_profit(self): """ Continually check redis for new profit information """ # Acessing Redis can cause greenlet switches because new jobs. We don't # want to potentially switch jobs multiple times quickly, so we update # the profitability information all at once after the loop to avoid # multiple network switches new_price_data = {} for manager in self.jobmanagers.itervalues(): currency = manager.config['currency'] pscore = self.redis.get("{}_profit".format(currency)) # Deserialize if pscore: try: pscore = simplejson.loads(pscore, use_decimal=True) except Exception: self.logger.warn( "Error parsing profit score for {}! Setting it to 0.." .format(currency)) pscore = 0 pass # If no score was grabbed, pass a 0 value score else: self.logger.warn("Unable to grab profit info for {}!" .format(currency)) pscore = 0 ratio = self.redis.get("{}_ratio".format(currency)) or 1.0 ratio = float(ratio) # Only set updated if it actually changed if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio: new_price_data[currency] = (pscore, ratio, time.time()) # If we have some new information, adjust accordingly if new_price_data: self.logger.info("Updated price information for {}" .format(new_price_data.keys())) # Atomic update in gevent self.price_data.update(new_price_data) # Update all the profit info. No preemption, just maths for currency in self.jobmanagers.iterkeys(): self.update_profitability(currency) self.logger.debug( "Re-checking best network after new price data for {}" .format(new_price_data.keys())) self.check_best() def check_best(self): """ Assuming that `profit_data` is completely up to date, evaluate the most profitable network and switch immediately if there's a big enough difference. Otherwise set it to be changed at next block notification. """ # Get the most profitable network based on our current data new_best = max(self.profit_data.iteritems(), key=operator.itemgetter(1))[0] if self.current_network is None: self.logger.info( "No active network, so switching to {} with profit of {:,.4f}" .format(new_best, self.profit_data[new_best])) self.next_network = new_best self.switch_network() return # If the currently most profitable network is 120% the profitability # of what we're mining on, we should switch immediately margin_switch = self.config['margin_switch'] if (margin_switch and self.profit_data[self.next_network] > (self.profit_data[self.current_network] * margin_switch)): self.logger.info( "Network {} {:,.4f} now more profitable than current network " "{} {:,.4f} by a fair margin. Switching NOW." .format(new_best, self.profit_data[new_best], self.current_network, self.profit_data[self.current_network])) self.next_network = new_best self.switch_network() return if new_best != self.next_network: self.logger.info( "Network {} {:,.4f} now more profitable than current best " "{} {:,.4f}. Switching on next block from current network {}." .format(new_best, self.profit_data[new_best], self.next_network, self.profit_data[self.next_network], self.current_network)) self.next_network = new_best return self.logger.debug("Network {} {:,.4f} still most profitable" .format(new_best, self.profit_data[new_best])) def switch_network(self): """ Pushes a network change to the user if it's needed """ if self.next_network != self.current_network: job = self.jobmanagers[self.next_network].latest_job if job is None: self.logger.error( "Tried to switch network to {} that has no job!" .format(self.next_network)) return if self.current_network: self.logger.info( "Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW" .format(self.current_network, self.profit_data[self.current_network], self.next_network, self.profit_data[self.next_network])) self.current_network = self.next_network job.type = 0 self.new_job.job = job self.new_job.set() self.new_job.clear() return True return False def update_profitability(self, currency): """ Recalculates the profitability for a specific currency """ jobmanager = self.jobmanagers[currency] last_job = jobmanager.latest_job pscore, ratio, _ = self.price_data[currency] # We can't update if we don't have a job and profit data if last_job is None or pscore is None: return False max_blockheight = jobmanager.config['max_blockheight'] if max_blockheight is not None and last_job.block_height >= max_blockheight: self.profit_data[currency] = 0 self.logger.debug( "{} height {} is >= the configured maximum blockheight of {}, " "setting profitability to 0." .format(currency, last_job.block_height, max_blockheight)) return True block_value = last_job.total_value / 100000000.0 diff = bits_to_difficulty(hexlify(last_job.bits)) self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000 self.logger.debug( "Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}" ";\n\tdiff {};\n\tratio {};\n\tresult {}" .format(currency, block_value, float(pscore), diff, ratio, self.profit_data[currency])) self.manager.log_event("{name}.profitability.{curr}:{metric}|g" .format(name=self.manager.config['procname'], curr=currency, metric=self.profit_data[currency])) return True def new_job_notif(self, event): currency = event.job.currency flush = event.job.type == 0 if currency == self.current_network: self.logger.info("Recieved new job on most profitable network {}" .format(currency)) # See if we need to switch now that we're done with that block. If # not, push a new job on this network if not self.switch_network(): self.new_job.job = event.job self.new_job.set() self.new_job.clear() # If we're recieving a new block then diff has changed, so update the # network profit and recompute best network if flush and self.update_profitability(currency): self.logger.debug("Re-checking best network after new job from {}" .format(currency)) self.check_best() def start(self): Jobmanager.start(self) self.config['jobmanagers'] = set(self.config['jobmanagers']) found_managers = set() for manager in self.manager.component_types['Jobmanager']: if manager.key in self.config['jobmanagers']: currency = manager.config['currency'] self.jobmanagers[currency] = manager self.profit_data[currency] = 0 self.price_data[currency] = (None, None, None) found_managers.add(manager.key) manager.new_job.rawlink(self.new_job_notif) for monitor in self.config['jobmanagers'] - found_managers: self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor))
import redis import simplejson import time import operator from cryptokit import bits_to_difficulty from gevent.event import Event from powerpool.lib import loop from powerpool.jobmanagers import Jobmanager from binascii import hexlify class MonitorNetworkMulti(Jobmanager): defaults = config = dict(jobmanagers=None, profit_poll_int=1, redis={}, margin_switch=1.2, exchange_manager={}) def __init__(self, config): self._configure(config) # Since some MonitorNetwork objs are polling and some aren't.... self.gl_methods = ['update_profit'] # Child jobmanagers self.jobmanagers = {} self.price_data = {} self.profit_data = {} self.next_network = None self.current_network = None # Currently active jobs keyed by their unique ID self.jobs = {} self.new_job = Event() self.redis = redis.Redis(**self.config['redis']) @property def latest_job(self): """ Proxy the jobmanager we're currently mining ons job """ return self.jobmanagers[self.current_network].latest_job @property def status(self): """ For display in the http monitor """ return dict(price_data=self.price_data, profit_data=self.profit_data, next_network=self.next_network, current_network=self.current_network) @loop(interval='profit_poll_int') def update_profit(self): """ Continually check redis for new profit information """ # Acessing Redis can cause greenlet switches because new jobs. We don't # want to potentially switch jobs multiple times quickly, so we update # the profitability information all at once after the loop to avoid # multiple network switches new_price_data = {} for manager in self.jobmanagers.itervalues(): currency = manager.config['currency'] pscore = self.redis.get("{}_profit".format(currency)) # Deserialize if pscore: try: pscore = simplejson.loads(pscore, use_decimal=True) except Exception: self.logger.warn( "Error parsing profit score for {}! Setting it to 0.." .format(currency)) pscore = 0 pass # If no score was grabbed, pass a 0 value score else: self.logger.warn("Unable to grab profit info for {}!" .format(currency)) pscore = 0 ratio = self.redis.get("{}_ratio".format(currency)) or 1.0 ratio = float(ratio) # Only set updated if it actually changed if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio: new_price_data[currency] = (pscore, ratio, time.time()) # If we have some new information, adjust accordingly if new_price_data: self.logger.info("Updated price information for {}" .format(new_price_data.keys())) # Atomic update in gevent self.price_data.update(new_price_data) # Update all the profit info. No preemption, just maths for currency in self.jobmanagers.iterkeys(): self.update_profitability(currency) self.logger.debug( "Re-checking best network after new price data for {}" .format(new_price_data.keys())) self.check_best() def check_best(self): """ Assuming that `profit_data` is completely up to date, evaluate the most profitable network and switch immediately if there's a big enough difference. Otherwise set it to be changed at next block notification. """ # Get the most profitable network based on our current data new_best = max(self.profit_data.iteritems(), key=operator.itemgetter(1))[0] if self.current_network is None: self.logger.info( "No active network, so switching to {} with profit of {:,.4f}" .format(new_best, self.profit_data[new_best])) self.next_network = new_best self.switch_network() return # If the currently most profitable network is 120% the profitability # of what we're mining on, we should switch immediately margin_switch = self.config['margin_switch'] if (margin_switch and self.profit_data[self.next_network] > (self.profit_data[self.current_network] * margin_switch)): self.logger.info( "Network {} {:,.4f} now more profitable than current network " "{} {:,.4f} by a fair margin. Switching NOW." .format(new_best, self.profit_data[new_best], self.current_network, self.profit_data[self.current_network])) self.next_network = new_best self.switch_network() return if new_best != self.next_network: self.logger.info( "Network {} {:,.4f} now more profitable than current best " "{} {:,.4f}. Switching on next block from current network {}." .format(new_best, self.profit_data[new_best], self.next_network, self.profit_data[self.next_network], self.current_network)) self.next_network = new_best return self.logger.debug("Network {} {:,.4f} still most profitable" .format(new_best, self.profit_data[new_best])) def switch_network(self): """ Pushes a network change to the user if it's needed """ if self.next_network != self.current_network: job = self.jobmanagers[self.next_network].latest_job if job is None: self.logger.error( "Tried to switch network to {} that has no job!" .format(self.next_network)) return if self.current_network: self.logger.info( "Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW" .format(self.current_network, self.profit_data[self.current_network], self.next_network, self.profit_data[self.next_network])) self.current_network = self.next_network job.type = 0 self.new_job.job = job self.new_job.set() self.new_job.clear() return True return False def update_profitability(self, currency): """ Recalculates the profitability for a specific currency """ jobmanager = self.jobmanagers[currency] last_job = jobmanager.latest_job pscore, ratio, _ = self.price_data[currency] # We can't update if we don't have a job and profit data if last_job is None or pscore is None: return False max_blockheight = jobmanager.config['max_blockheight'] if max_blockheight is not None and last_job.block_height >= max_blockheight: self.profit_data[currency] = 0 self.logger.debug( "{} height {} is >= the configured maximum blockheight of {}, " "setting profitability to 0." .format(currency, last_job.block_height, max_blockheight)) return True block_value = last_job.total_value / 100000000.0 diff = bits_to_difficulty(hexlify(last_job.bits)) self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000 self.logger.debug( "Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}" ";\n\tdiff {};\n\tratio {};\n\tresult {}" .format(currency, block_value, float(pscore), diff, ratio, self.profit_data[currency])) self.manager.log_event("{name}.profitability.{curr}:{metric}|g" .format(name=self.manager.config['procname'], curr=currency, metric=self.profit_data[currency])) return True def new_job_notif(self, event): currency = event.job.currency flush = event.job.type == 0 if currency == self.current_network: self.logger.info("Recieved new job on most profitable network {}" .format(currency)) # See if we need to switch now that we're done with that block. If # not, push a new job on this network if not self.switch_network(): self.new_job.job = event.job self.new_job.set() self.new_job.clear() # If we're recieving a new block then diff has changed, so update the # network profit and recompute best network if flush and self.update_profitability(currency): self.logger.debug("Re-checking best network after new job from {}" .format(currency)) self.check_best() def start(self): Jobmanager.start(self) self.config['jobmanagers'] = set(self.config['jobmanagers']) found_managers = set() for manager in self.manager.component_types['Jobmanager']: if manager.key in self.config['jobmanagers']: currency = manager.config['currency'] self.jobmanagers[currency] = manager self.profit_data[currency] = 0 self.price_data[currency] = (None, None, None) found_managers.add(manager.key) manager.new_job.rawlink(self.new_job_notif) for monitor in self.config['jobmanagers'] - found_managers: self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor))
en
0.908848
# Since some MonitorNetwork objs are polling and some aren't.... # Child jobmanagers # Currently active jobs keyed by their unique ID Proxy the jobmanager we're currently mining ons job For display in the http monitor Continually check redis for new profit information # Acessing Redis can cause greenlet switches because new jobs. We don't # want to potentially switch jobs multiple times quickly, so we update # the profitability information all at once after the loop to avoid # multiple network switches # Deserialize # If no score was grabbed, pass a 0 value score # Only set updated if it actually changed # If we have some new information, adjust accordingly # Atomic update in gevent # Update all the profit info. No preemption, just maths Assuming that `profit_data` is completely up to date, evaluate the most profitable network and switch immediately if there's a big enough difference. Otherwise set it to be changed at next block notification. # Get the most profitable network based on our current data # If the currently most profitable network is 120% the profitability # of what we're mining on, we should switch immediately Pushes a network change to the user if it's needed Recalculates the profitability for a specific currency # We can't update if we don't have a job and profit data # See if we need to switch now that we're done with that block. If # not, push a new job on this network # If we're recieving a new block then diff has changed, so update the # network profit and recompute best network
2.43168
2
src/indriya_python_client/experimot_zmq_plot_client.py
praveenv4k/Indriya
1
6617636
<reponame>praveenv4k/Indriya<filename>src/indriya_python_client/experimot_zmq_plot_client.py<gh_stars>1-10 __author__ = '<NAME>' __copyright__ = "Copyright 2015, GVLab" __credits__ = ["<NAME>"] __license__ = "MIT" __version__ = "0.0.1" __email__ = "<EMAIL>" __status__ = "Development" # Standard libraries import socket import json import sys import thread import threading import time import math # Additional dependencis # ZeroMQ Python bindings - http://zeromq.org/bindings:python import zmq # matplot library - http://matplotlib.org/ import matplotlib.pyplot as plt import matplotlib.transforms as tfm import matplotlib.patches as patches import matplotlib.animation as animation # transformations.py - http://www.lfd.uci.edu/~gohlke/code/transformations.py.html import transformations # some global variables for positions and orientations pos = [0.0,0.0,0.0] orient = [1.0,0.0,0.0,0.0] pose = [0.0,0.0,0.0] ############################################################################################################# # Localization server - A sample server for testing purpose def localization_server(ip,port): context = zmq.Context() socket = context.socket(zmq.REP) socket.bind(("tcp://*:%d" % port)) #dummy = { 'pos' : {'x':'1000','y':'1000','z':'1000'}, 'orient': {'w':'1','x':'0','y':'0','z':'0'}}; dummy = { 'pose' : {'x':'1','y':'1','alpha':'0'}}; while True: # Wait for next request from client message = socket.recv() #print("Received request: %s" % message) # Do some 'work' time.sleep(0.050) # Send reply back to client socket.send(json.dumps(dummy)) print "quitting ... " ############################################################################################################# # Localization client def localization_client(lock,ip,port): context = zmq.Context() # Socket to talk to server print "Connecting to localization server ... " socket = context.socket(zmq.REQ) socket.connect(("tcp://localhost:%d" % port)) # Do 10 requests, waiting each time for a response while True: # print("Sending request %s ... " % request) socket.send(b"pose") # Get the reply. str = socket.recv(1024) print str result = json.loads(str) # printing the result # print(result) # In order to access position : (result["pos"]["x"],result["pos"]["y"],result["pos"]["z"]) # In order to access orientation : (result["orient"]["w"],result["orient"]["x"],result["orient"]["y"],result["orient"]["z"]) lock.acquire() #global pos, orient #pos = [float(result["pos"]["x"]),float(result["pos"]["y"]),float(result["pos"]["z"])] #orient = [float(result["orient"]["w"]),float(result["orient"]["x"]),float(result["orient"]["y"]),float(result["orient"]["z"])] global pose #pose = [float(result["pose"]["x"]),float(result["pose"]["y"]),float(result["pose"]["alpha"])] pose = [float(result["pos"]["x"]),float(result["pos"]["y"]),float(result["orient"]["z"])] lock.release() #print "Position : " , pos #print "Orientation : " , orient # wait for a while time.sleep(0.050) ############################################################################################################# # Plotting function def plot_robot_pose(interval, lock): # Creating a figure F = plt.figure(1,(10,10)) # Get an instance to axis ax = plt.gca() # clear things for fresh plot ax.cla() # change default range - approximate localization range ax.set_xlim((-0.5,5.5)) ax.set_ylim((-3,3)) # set the titles ax.set_title('Localization on XZ Plane') ax.set_xlabel('Z Axis (m)') ax.set_ylabel('X Axis (m)') # get a handle to the figure fig = plt.gcf() angle = 0 wedge1= patches.Wedge((3,0),0.5,angle-15,angle+15) # Kinect Camera points = [[0, 0.3], [0, -0.3], [-0.3, 0]] polygon = plt.Polygon(points) fig.gca().add_artist(polygon) # Camera origin camera_origin=plt.Circle((0,0),0.05,color='r') fig.gca().add_artist(camera_origin) # Camera Text annotation ax.text(-0.4, -0.5, 'KINECT', fontsize=18) # Draw stuff plt.draw() # enable grid plt.grid() def init(): wedge1.center = (3, 0) ax.add_patch(wedge1) return wedge1, def animate(i): lock.acquire() local_pos = pose[0:2]; local_orient = pose[2] lock.release() #local_pos = [x/1000 for x in local_pos] #euler = transformations.euler_from_quaternion(local_orient,axes='syxz') #euler = [math.degrees(x) for x in euler] #deg = euler[1] deg = math.degrees(local_orient) #x,y = wedge1.center x = local_pos[0] y = local_pos[1] wedge1.set_center((x,y)) wedge1.set_theta1(deg - 15) wedge1.set_theta2(deg + 15) #print "Hello : (%d,%d);(%d,%d)" % (x,y,theta1,theta2) print "Position : " , local_pos print "Orientation : " , local_orient print "Euler : " , deg return wedge1, an1 = animation.FuncAnimation(fig,animate,init_func=init,interval=interval) plt.show() ############################################################################################################# # Main function if __name__ == "__main__": # port port = 5700 # ip address ip = "localhost" try: # create a lock object for synchronization lock = thread.allocate_lock() # Starting the plotting thread interval = 500; thread.start_new_thread(plot_robot_pose,(interval,lock)); # Responder (server) - uncomment to check with the local server #thread.start_new_thread(localization_server,(ip,port)); # Requester (client) thread.start_new_thread(localization_client,(lock,ip,port)); except: # printing on exception print "Exception occured : ", sys.exc_info() # do nothing in the main thread while 1: pass
__author__ = '<NAME>' __copyright__ = "Copyright 2015, GVLab" __credits__ = ["<NAME>"] __license__ = "MIT" __version__ = "0.0.1" __email__ = "<EMAIL>" __status__ = "Development" # Standard libraries import socket import json import sys import thread import threading import time import math # Additional dependencis # ZeroMQ Python bindings - http://zeromq.org/bindings:python import zmq # matplot library - http://matplotlib.org/ import matplotlib.pyplot as plt import matplotlib.transforms as tfm import matplotlib.patches as patches import matplotlib.animation as animation # transformations.py - http://www.lfd.uci.edu/~gohlke/code/transformations.py.html import transformations # some global variables for positions and orientations pos = [0.0,0.0,0.0] orient = [1.0,0.0,0.0,0.0] pose = [0.0,0.0,0.0] ############################################################################################################# # Localization server - A sample server for testing purpose def localization_server(ip,port): context = zmq.Context() socket = context.socket(zmq.REP) socket.bind(("tcp://*:%d" % port)) #dummy = { 'pos' : {'x':'1000','y':'1000','z':'1000'}, 'orient': {'w':'1','x':'0','y':'0','z':'0'}}; dummy = { 'pose' : {'x':'1','y':'1','alpha':'0'}}; while True: # Wait for next request from client message = socket.recv() #print("Received request: %s" % message) # Do some 'work' time.sleep(0.050) # Send reply back to client socket.send(json.dumps(dummy)) print "quitting ... " ############################################################################################################# # Localization client def localization_client(lock,ip,port): context = zmq.Context() # Socket to talk to server print "Connecting to localization server ... " socket = context.socket(zmq.REQ) socket.connect(("tcp://localhost:%d" % port)) # Do 10 requests, waiting each time for a response while True: # print("Sending request %s ... " % request) socket.send(b"pose") # Get the reply. str = socket.recv(1024) print str result = json.loads(str) # printing the result # print(result) # In order to access position : (result["pos"]["x"],result["pos"]["y"],result["pos"]["z"]) # In order to access orientation : (result["orient"]["w"],result["orient"]["x"],result["orient"]["y"],result["orient"]["z"]) lock.acquire() #global pos, orient #pos = [float(result["pos"]["x"]),float(result["pos"]["y"]),float(result["pos"]["z"])] #orient = [float(result["orient"]["w"]),float(result["orient"]["x"]),float(result["orient"]["y"]),float(result["orient"]["z"])] global pose #pose = [float(result["pose"]["x"]),float(result["pose"]["y"]),float(result["pose"]["alpha"])] pose = [float(result["pos"]["x"]),float(result["pos"]["y"]),float(result["orient"]["z"])] lock.release() #print "Position : " , pos #print "Orientation : " , orient # wait for a while time.sleep(0.050) ############################################################################################################# # Plotting function def plot_robot_pose(interval, lock): # Creating a figure F = plt.figure(1,(10,10)) # Get an instance to axis ax = plt.gca() # clear things for fresh plot ax.cla() # change default range - approximate localization range ax.set_xlim((-0.5,5.5)) ax.set_ylim((-3,3)) # set the titles ax.set_title('Localization on XZ Plane') ax.set_xlabel('Z Axis (m)') ax.set_ylabel('X Axis (m)') # get a handle to the figure fig = plt.gcf() angle = 0 wedge1= patches.Wedge((3,0),0.5,angle-15,angle+15) # Kinect Camera points = [[0, 0.3], [0, -0.3], [-0.3, 0]] polygon = plt.Polygon(points) fig.gca().add_artist(polygon) # Camera origin camera_origin=plt.Circle((0,0),0.05,color='r') fig.gca().add_artist(camera_origin) # Camera Text annotation ax.text(-0.4, -0.5, 'KINECT', fontsize=18) # Draw stuff plt.draw() # enable grid plt.grid() def init(): wedge1.center = (3, 0) ax.add_patch(wedge1) return wedge1, def animate(i): lock.acquire() local_pos = pose[0:2]; local_orient = pose[2] lock.release() #local_pos = [x/1000 for x in local_pos] #euler = transformations.euler_from_quaternion(local_orient,axes='syxz') #euler = [math.degrees(x) for x in euler] #deg = euler[1] deg = math.degrees(local_orient) #x,y = wedge1.center x = local_pos[0] y = local_pos[1] wedge1.set_center((x,y)) wedge1.set_theta1(deg - 15) wedge1.set_theta2(deg + 15) #print "Hello : (%d,%d);(%d,%d)" % (x,y,theta1,theta2) print "Position : " , local_pos print "Orientation : " , local_orient print "Euler : " , deg return wedge1, an1 = animation.FuncAnimation(fig,animate,init_func=init,interval=interval) plt.show() ############################################################################################################# # Main function if __name__ == "__main__": # port port = 5700 # ip address ip = "localhost" try: # create a lock object for synchronization lock = thread.allocate_lock() # Starting the plotting thread interval = 500; thread.start_new_thread(plot_robot_pose,(interval,lock)); # Responder (server) - uncomment to check with the local server #thread.start_new_thread(localization_server,(ip,port)); # Requester (client) thread.start_new_thread(localization_client,(lock,ip,port)); except: # printing on exception print "Exception occured : ", sys.exc_info() # do nothing in the main thread while 1: pass
en
0.372595
# Standard libraries # Additional dependencis # ZeroMQ Python bindings - http://zeromq.org/bindings:python # matplot library - http://matplotlib.org/ # transformations.py - http://www.lfd.uci.edu/~gohlke/code/transformations.py.html # some global variables for positions and orientations ############################################################################################################# # Localization server - A sample server for testing purpose #dummy = { 'pos' : {'x':'1000','y':'1000','z':'1000'}, 'orient': {'w':'1','x':'0','y':'0','z':'0'}}; # Wait for next request from client #print("Received request: %s" % message) # Do some 'work' # Send reply back to client ############################################################################################################# # Localization client # Socket to talk to server # Do 10 requests, waiting each time for a response # print("Sending request %s ... " % request) # Get the reply. # printing the result # print(result) # In order to access position : (result["pos"]["x"],result["pos"]["y"],result["pos"]["z"]) # In order to access orientation : (result["orient"]["w"],result["orient"]["x"],result["orient"]["y"],result["orient"]["z"]) #global pos, orient #pos = [float(result["pos"]["x"]),float(result["pos"]["y"]),float(result["pos"]["z"])] #orient = [float(result["orient"]["w"]),float(result["orient"]["x"]),float(result["orient"]["y"]),float(result["orient"]["z"])] #pose = [float(result["pose"]["x"]),float(result["pose"]["y"]),float(result["pose"]["alpha"])] #print "Position : " , pos #print "Orientation : " , orient # wait for a while ############################################################################################################# # Plotting function # Creating a figure # Get an instance to axis # clear things for fresh plot # change default range - approximate localization range # set the titles # get a handle to the figure # Kinect Camera # Camera origin # Camera Text annotation # Draw stuff # enable grid #local_pos = [x/1000 for x in local_pos] #euler = transformations.euler_from_quaternion(local_orient,axes='syxz') #euler = [math.degrees(x) for x in euler] #deg = euler[1] #x,y = wedge1.center #print "Hello : (%d,%d);(%d,%d)" % (x,y,theta1,theta2) ############################################################################################################# # Main function # port # ip address # create a lock object for synchronization # Starting the plotting thread # Responder (server) - uncomment to check with the local server #thread.start_new_thread(localization_server,(ip,port)); # Requester (client) # printing on exception # do nothing in the main thread
2.209513
2
scripts/keeper/harvest.py
shuklaayush/badger-system
99
6617637
from brownie import * from web3.contract import estimate_gas_for_function from config.keeper import keeper_config from helpers.gas_utils import gas_strategies from helpers.registry import registry from helpers.sett.SnapshotManager import SnapshotManager from helpers.utils import tx_wait, val from helpers.console_utils import console from scripts.systems.badger_system import BadgerSystem, connect_badger from tabulate import tabulate gas_strategies.set_default_for_active_chain() def harvest_all(badger: BadgerSystem, skip, min_profit=0): """ Runs harvest function for strategies if they are expected to be profitable. If a profit estimate fails for any reason the default behavior is to treat it as having a profit of zero. :param badger: badger system :param skip: strategies to skip checking :param min_profit: minimum estimated profit (in ETH or BNB) required for harvest to be executed on chain """ for key, vault in badger.sett_system.vaults.items(): if key in skip: continue console.print( "\n[bold yellow]===== Harvest: " + str(key) + " =====[/bold yellow]\n" ) print("Harvest: " + key) snap = SnapshotManager(badger, key) strategy = badger.getStrategy(key) before = snap.snap() if strategy.keeper() == badger.badgerRewardsManager: keeper = accounts.at(strategy.keeper()) estimated_profit = snap.estimateProfitHarvestViaManager( key, strategy, {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, min_profit, ) if estimated_profit >= min_profit: snap.settHarvestViaManager( strategy, {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, confirm=False, ) else: estimated_profit = snap.estimateProfitHarvest( key, {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, min_profit, ) if estimated_profit >= min_profit: snap.settHarvest( {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, confirm=False, ) tx_wait() if rpc.is_active(): chain.mine() after = snap.snap() snap.printCompare(before, after) def main(): badger = connect_badger(load_keeper=True, load_harvester=True) skip = keeper_config.get_active_chain_skipped_setts("harvest") if rpc.is_active(): """ Test: Load up testing accounts with ETH """ accounts[0].transfer(badger.deployer, Wei("5 ether")) accounts[0].transfer(badger.keeper, Wei("5 ether")) accounts[0].transfer(badger.guardian, Wei("5 ether")) skip.append("native.test") skip.append("yearn.wbtc") skip.append("experimental.sushiIBbtcWbtc") skip.append("experimental.digg") harvest_all(badger, skip)
from brownie import * from web3.contract import estimate_gas_for_function from config.keeper import keeper_config from helpers.gas_utils import gas_strategies from helpers.registry import registry from helpers.sett.SnapshotManager import SnapshotManager from helpers.utils import tx_wait, val from helpers.console_utils import console from scripts.systems.badger_system import BadgerSystem, connect_badger from tabulate import tabulate gas_strategies.set_default_for_active_chain() def harvest_all(badger: BadgerSystem, skip, min_profit=0): """ Runs harvest function for strategies if they are expected to be profitable. If a profit estimate fails for any reason the default behavior is to treat it as having a profit of zero. :param badger: badger system :param skip: strategies to skip checking :param min_profit: minimum estimated profit (in ETH or BNB) required for harvest to be executed on chain """ for key, vault in badger.sett_system.vaults.items(): if key in skip: continue console.print( "\n[bold yellow]===== Harvest: " + str(key) + " =====[/bold yellow]\n" ) print("Harvest: " + key) snap = SnapshotManager(badger, key) strategy = badger.getStrategy(key) before = snap.snap() if strategy.keeper() == badger.badgerRewardsManager: keeper = accounts.at(strategy.keeper()) estimated_profit = snap.estimateProfitHarvestViaManager( key, strategy, {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, min_profit, ) if estimated_profit >= min_profit: snap.settHarvestViaManager( strategy, {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, confirm=False, ) else: estimated_profit = snap.estimateProfitHarvest( key, {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, min_profit, ) if estimated_profit >= min_profit: snap.settHarvest( {"from": keeper, "gas_limit": 2000000, "allow_revert": True}, confirm=False, ) tx_wait() if rpc.is_active(): chain.mine() after = snap.snap() snap.printCompare(before, after) def main(): badger = connect_badger(load_keeper=True, load_harvester=True) skip = keeper_config.get_active_chain_skipped_setts("harvest") if rpc.is_active(): """ Test: Load up testing accounts with ETH """ accounts[0].transfer(badger.deployer, Wei("5 ether")) accounts[0].transfer(badger.keeper, Wei("5 ether")) accounts[0].transfer(badger.guardian, Wei("5 ether")) skip.append("native.test") skip.append("yearn.wbtc") skip.append("experimental.sushiIBbtcWbtc") skip.append("experimental.digg") harvest_all(badger, skip)
en
0.837381
Runs harvest function for strategies if they are expected to be profitable. If a profit estimate fails for any reason the default behavior is to treat it as having a profit of zero. :param badger: badger system :param skip: strategies to skip checking :param min_profit: minimum estimated profit (in ETH or BNB) required for harvest to be executed on chain Test: Load up testing accounts with ETH
2.028771
2
Modules and packages/Packages/classes/calculator.py
kislyakovm/introduction-to-python
5
6617638
""" This module contains calculator classes """ class Add: def __init__(self): self.current = 0 def add(self, amount): self.current += amount def get_current(self): return self.current class Subtract: def __init__(self): self.current = 100 def subtract(self, amount): self.current -= amount def get_current(self): return self.current class Multiply: def __init__(self): self.current = 10 def multiply(self, amount): self.current *= amount def get_current(self): return self.current
""" This module contains calculator classes """ class Add: def __init__(self): self.current = 0 def add(self, amount): self.current += amount def get_current(self): return self.current class Subtract: def __init__(self): self.current = 100 def subtract(self, amount): self.current -= amount def get_current(self): return self.current class Multiply: def __init__(self): self.current = 10 def multiply(self, amount): self.current *= amount def get_current(self): return self.current
en
0.536907
This module contains calculator classes
3.756353
4
legal_radar/services/semantic_search_evaluation.py
meaningfy-ws/legal-radar
0
6617639
<filename>legal_radar/services/semantic_search_evaluation.py<gh_stars>0 import pickle from legal_radar import config from legal_radar.services.feature_selector import reduce_array_column from legal_radar.services.store_registry import store_registry from legal_radar.services.split_documents_pipeline import DOCUMENT_ID_SOURCE import pandas as pd from mfy_nlp_core.adapters.abstract_model import SentenceEmbeddingModelABC import faiss import numpy as np DATES_DOCUMENT = 'dates_document' HTML_LINKS = 'htmls_to_download' TEXT_PIECE = 'text_piece' SAMPLE_QUESTIONS_COLUMN = 'Questions/Text Extracts' SAMPLE_CELEX_NUMBER = 'Celex No' class SemanticSearchEvaluation: def __init__(self, documents_es_index_name: str, splitted_documents_es_index_name: str, faiss_bucket_name: str, faiss_index_path: str, embedding_model: SentenceEmbeddingModelABC, sample_questions_csv_path: str, sample_questions_csv_sep: str, ): self.documents_es_index_name = documents_es_index_name self.splitted_documents_es_index_name = splitted_documents_es_index_name self.faiss_bucket_name = faiss_bucket_name self.faiss_index_path = faiss_index_path self.embedding_model = embedding_model self.sample_questions_csv_path = sample_questions_csv_path self.sample_questions_csv_sep = sample_questions_csv_sep self.documents = None self.splitted_documents = None self.faiss_index = None self.sample_questions = None def load_documents(self): """Read the data from ES.""" es_store = store_registry.es_index_store() df = es_store.get_dataframe(index_name=self.documents_es_index_name) df[DATES_DOCUMENT] = pd.to_datetime(df[DATES_DOCUMENT]).dt.date self.documents = df def load_splitted_documents(self): """Read the data from ES.""" es_store = store_registry.es_index_store() self.splitted_documents = es_store.get_dataframe( index_name=self.splitted_documents_es_index_name) def load_faiss_index(self): """Load and deserialize the Faiss index.""" minio_store = store_registry.minio_object_store( minio_bucket=self.faiss_bucket_name) data = pickle.loads(minio_store.get_object( object_name=self.faiss_index_path)) self.faiss_index = faiss.deserialize_index(data) def load_sample_questions(self): self.sample_questions = pd.read_csv( self.sample_questions_csv_path, sep=self.sample_questions_csv_sep) self.sample_questions = self.sample_questions[self.sample_questions[SAMPLE_QUESTIONS_COLUMN].notnull( )] def semantic_search(self, user_input: str): num_results = 100 embeddings = self.embedding_model.encode(sentences=[user_input]) D, I = self.faiss_index.search( np.array(embeddings).astype("float32"), k=num_results) document_parts = pd.DataFrame( self.splitted_documents.iloc[I.flatten().tolist()]) document_parts['similarity'] = pd.Series(D.flatten().tolist()).values document_parts = document_parts.drop_duplicates( DOCUMENT_ID_SOURCE).reset_index(drop=True) documents_id = document_parts[DOCUMENT_ID_SOURCE].values result_documents = pd.DataFrame( self.documents.loc[documents_id]).reset_index(drop=True) result_documents['similarity'] = document_parts['similarity'] result_documents['text_piece'] = document_parts['text_piece'] return result_documents def find_part_in_search_result(self, result_set: pd.DataFrame, reference_dataset_celex_number: str, result_set_celex_number: str = 'celex_numbers') -> tuple: """Finds the position and the similarity of the documents parts from the result set Args: result_set (pd.DataFrame): the result dataset from semantic search execution reference_dataset_celex_number (str): celex numbers from test bed dataset result_set_celex_number (str, optional): [description]. Defaults to 'celex_numbers'. Returns: tuple: the position and the similarity from document part result set """ reduced_array_dataset = reduce_array_column( result_set, result_set_celex_number).reset_index(drop=True) index = reduced_array_dataset[ reduced_array_dataset[result_set_celex_number].isin([reference_dataset_celex_number])].index.to_list() position = reduced_array_dataset['text_piece'].loc[index].index.to_list( ) similarity = reduced_array_dataset['similarity'].apply( lambda x: 1 / (1 + x)).loc[index].to_list() return position, similarity def find_document_in_search_result(self, result_set: pd.DataFrame, reference_dataset_celex_number: str, result_set_celex_number: str = 'celex_numbers') -> list: """Finds the position and the similarity of the documents from the result set Args: result_set (pd.DataFrame): the result dataset from semantic search execution reference_dataset_celex_number (str): celex numbers from test bed dataset result_set_celex_number (str, optional): [description]. Defaults to 'celex_numbers'. Returns: list: the position of the document from result set """ reduced_array_dataset = reduce_array_column( result_set, result_set_celex_number).reset_index(drop=True) index = reduced_array_dataset[ reduced_array_dataset[result_set_celex_number].isin([reference_dataset_celex_number])].index.to_list() position = reduced_array_dataset['title'].loc[index].index.to_list() return position def evaluate_parts(self, test_bed: pd.DataFrame) -> list: """Executes each input query from the test bed dataset into semantic search and grabs the position and the similarity of the documents and documents' part of the result set. Args: test_bed (pd.DataFrame): test dataset with the input queries and comparable celex number Returns: list: the position and the similarity of the documents and documents' part """ result = [] for index, row in test_bed.iterrows(): result_set = self.semantic_search(row[SAMPLE_QUESTIONS_COLUMN]) position_p, similarity = self.find_part_in_search_result( result_set, row[SAMPLE_CELEX_NUMBER]) position_d = self.find_document_in_search_result( result_set, row[SAMPLE_CELEX_NUMBER]) result.append({ 'position_part': position_p, 'position_document': position_d, 'similarity': similarity }) return result def merge_test_bed_with_result_set(self, test_bed: pd.DataFrame, result_set: list) -> pd.DataFrame: """Merge the test bed dataframe and the result set list into a single dataframe Args: test_bed (pd.DataFrame): test dataset with the input queries and comparable celex number result_set (list): the result from evaluation part Returns: pd.DataFrame: merged dataframe from test bed and evaluation part """ result = pd.DataFrame(result_set) result = result.assign(in_top_5_slices=result['position_part'].apply(lambda x: any(np.array(x) <= 5)), in_top_10_slices=result['position_part'].apply( lambda x: any(np.array(x) <= 10)), in_top_5_documents=result['position_document'].apply( lambda x: any(np.array(x) <= 5)), in_top_10_documents=result['position_document'].apply( lambda x: any(np.array(x) <= 10)), in_q3=result['similarity'].apply(lambda x: any(np.array(x) >= 0.75))) return pd.merge(test_bed, result, on=test_bed.index, how="inner") def evaluate(self) -> pd.DataFrame: self.load_documents() self.load_splitted_documents() self.load_faiss_index() self.load_sample_questions() evaluation = self.evaluate_parts(self.sample_questions) result = self.merge_test_bed_with_result_set( self.sample_questions, evaluation) return result
<filename>legal_radar/services/semantic_search_evaluation.py<gh_stars>0 import pickle from legal_radar import config from legal_radar.services.feature_selector import reduce_array_column from legal_radar.services.store_registry import store_registry from legal_radar.services.split_documents_pipeline import DOCUMENT_ID_SOURCE import pandas as pd from mfy_nlp_core.adapters.abstract_model import SentenceEmbeddingModelABC import faiss import numpy as np DATES_DOCUMENT = 'dates_document' HTML_LINKS = 'htmls_to_download' TEXT_PIECE = 'text_piece' SAMPLE_QUESTIONS_COLUMN = 'Questions/Text Extracts' SAMPLE_CELEX_NUMBER = 'Celex No' class SemanticSearchEvaluation: def __init__(self, documents_es_index_name: str, splitted_documents_es_index_name: str, faiss_bucket_name: str, faiss_index_path: str, embedding_model: SentenceEmbeddingModelABC, sample_questions_csv_path: str, sample_questions_csv_sep: str, ): self.documents_es_index_name = documents_es_index_name self.splitted_documents_es_index_name = splitted_documents_es_index_name self.faiss_bucket_name = faiss_bucket_name self.faiss_index_path = faiss_index_path self.embedding_model = embedding_model self.sample_questions_csv_path = sample_questions_csv_path self.sample_questions_csv_sep = sample_questions_csv_sep self.documents = None self.splitted_documents = None self.faiss_index = None self.sample_questions = None def load_documents(self): """Read the data from ES.""" es_store = store_registry.es_index_store() df = es_store.get_dataframe(index_name=self.documents_es_index_name) df[DATES_DOCUMENT] = pd.to_datetime(df[DATES_DOCUMENT]).dt.date self.documents = df def load_splitted_documents(self): """Read the data from ES.""" es_store = store_registry.es_index_store() self.splitted_documents = es_store.get_dataframe( index_name=self.splitted_documents_es_index_name) def load_faiss_index(self): """Load and deserialize the Faiss index.""" minio_store = store_registry.minio_object_store( minio_bucket=self.faiss_bucket_name) data = pickle.loads(minio_store.get_object( object_name=self.faiss_index_path)) self.faiss_index = faiss.deserialize_index(data) def load_sample_questions(self): self.sample_questions = pd.read_csv( self.sample_questions_csv_path, sep=self.sample_questions_csv_sep) self.sample_questions = self.sample_questions[self.sample_questions[SAMPLE_QUESTIONS_COLUMN].notnull( )] def semantic_search(self, user_input: str): num_results = 100 embeddings = self.embedding_model.encode(sentences=[user_input]) D, I = self.faiss_index.search( np.array(embeddings).astype("float32"), k=num_results) document_parts = pd.DataFrame( self.splitted_documents.iloc[I.flatten().tolist()]) document_parts['similarity'] = pd.Series(D.flatten().tolist()).values document_parts = document_parts.drop_duplicates( DOCUMENT_ID_SOURCE).reset_index(drop=True) documents_id = document_parts[DOCUMENT_ID_SOURCE].values result_documents = pd.DataFrame( self.documents.loc[documents_id]).reset_index(drop=True) result_documents['similarity'] = document_parts['similarity'] result_documents['text_piece'] = document_parts['text_piece'] return result_documents def find_part_in_search_result(self, result_set: pd.DataFrame, reference_dataset_celex_number: str, result_set_celex_number: str = 'celex_numbers') -> tuple: """Finds the position and the similarity of the documents parts from the result set Args: result_set (pd.DataFrame): the result dataset from semantic search execution reference_dataset_celex_number (str): celex numbers from test bed dataset result_set_celex_number (str, optional): [description]. Defaults to 'celex_numbers'. Returns: tuple: the position and the similarity from document part result set """ reduced_array_dataset = reduce_array_column( result_set, result_set_celex_number).reset_index(drop=True) index = reduced_array_dataset[ reduced_array_dataset[result_set_celex_number].isin([reference_dataset_celex_number])].index.to_list() position = reduced_array_dataset['text_piece'].loc[index].index.to_list( ) similarity = reduced_array_dataset['similarity'].apply( lambda x: 1 / (1 + x)).loc[index].to_list() return position, similarity def find_document_in_search_result(self, result_set: pd.DataFrame, reference_dataset_celex_number: str, result_set_celex_number: str = 'celex_numbers') -> list: """Finds the position and the similarity of the documents from the result set Args: result_set (pd.DataFrame): the result dataset from semantic search execution reference_dataset_celex_number (str): celex numbers from test bed dataset result_set_celex_number (str, optional): [description]. Defaults to 'celex_numbers'. Returns: list: the position of the document from result set """ reduced_array_dataset = reduce_array_column( result_set, result_set_celex_number).reset_index(drop=True) index = reduced_array_dataset[ reduced_array_dataset[result_set_celex_number].isin([reference_dataset_celex_number])].index.to_list() position = reduced_array_dataset['title'].loc[index].index.to_list() return position def evaluate_parts(self, test_bed: pd.DataFrame) -> list: """Executes each input query from the test bed dataset into semantic search and grabs the position and the similarity of the documents and documents' part of the result set. Args: test_bed (pd.DataFrame): test dataset with the input queries and comparable celex number Returns: list: the position and the similarity of the documents and documents' part """ result = [] for index, row in test_bed.iterrows(): result_set = self.semantic_search(row[SAMPLE_QUESTIONS_COLUMN]) position_p, similarity = self.find_part_in_search_result( result_set, row[SAMPLE_CELEX_NUMBER]) position_d = self.find_document_in_search_result( result_set, row[SAMPLE_CELEX_NUMBER]) result.append({ 'position_part': position_p, 'position_document': position_d, 'similarity': similarity }) return result def merge_test_bed_with_result_set(self, test_bed: pd.DataFrame, result_set: list) -> pd.DataFrame: """Merge the test bed dataframe and the result set list into a single dataframe Args: test_bed (pd.DataFrame): test dataset with the input queries and comparable celex number result_set (list): the result from evaluation part Returns: pd.DataFrame: merged dataframe from test bed and evaluation part """ result = pd.DataFrame(result_set) result = result.assign(in_top_5_slices=result['position_part'].apply(lambda x: any(np.array(x) <= 5)), in_top_10_slices=result['position_part'].apply( lambda x: any(np.array(x) <= 10)), in_top_5_documents=result['position_document'].apply( lambda x: any(np.array(x) <= 5)), in_top_10_documents=result['position_document'].apply( lambda x: any(np.array(x) <= 10)), in_q3=result['similarity'].apply(lambda x: any(np.array(x) >= 0.75))) return pd.merge(test_bed, result, on=test_bed.index, how="inner") def evaluate(self) -> pd.DataFrame: self.load_documents() self.load_splitted_documents() self.load_faiss_index() self.load_sample_questions() evaluation = self.evaluate_parts(self.sample_questions) result = self.merge_test_bed_with_result_set( self.sample_questions, evaluation) return result
en
0.650021
Read the data from ES. Read the data from ES. Load and deserialize the Faiss index. Finds the position and the similarity of the documents parts from the result set Args: result_set (pd.DataFrame): the result dataset from semantic search execution reference_dataset_celex_number (str): celex numbers from test bed dataset result_set_celex_number (str, optional): [description]. Defaults to 'celex_numbers'. Returns: tuple: the position and the similarity from document part result set Finds the position and the similarity of the documents from the result set Args: result_set (pd.DataFrame): the result dataset from semantic search execution reference_dataset_celex_number (str): celex numbers from test bed dataset result_set_celex_number (str, optional): [description]. Defaults to 'celex_numbers'. Returns: list: the position of the document from result set Executes each input query from the test bed dataset into semantic search and grabs the position and the similarity of the documents and documents' part of the result set. Args: test_bed (pd.DataFrame): test dataset with the input queries and comparable celex number Returns: list: the position and the similarity of the documents and documents' part Merge the test bed dataframe and the result set list into a single dataframe Args: test_bed (pd.DataFrame): test dataset with the input queries and comparable celex number result_set (list): the result from evaluation part Returns: pd.DataFrame: merged dataframe from test bed and evaluation part
2.198831
2
usaspending_api/search/v2/views/spending_by_category_views/spending_by_agency_types.py
Violet26/usaspending-api
0
6617640
<filename>usaspending_api/search/v2/views/spending_by_category_views/spending_by_agency_types.py from usaspending_api.search.v2.views.spending_by_category_views.base_spending_by_agency import ( AgencyType, BaseAgencyViewSet, ) from usaspending_api.search.v2.views.spending_by_category_views.base_spending_by_category import Category class AwardingAgencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by awarding agencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/awarding_agency.md" agency_type = AgencyType.AWARDING_TOPTIER category = Category(name="awarding_agency", primary_field="awarding_toptier_agency_name.keyword") class AwardingSubagencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by awarding subagencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/awarding_subagency.md" agency_type = AgencyType.AWARDING_SUBTIER category = Category(name="awarding_subagency", primary_field="awarding_subtier_agency_name.keyword") class FundingAgencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by funding agencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/funding_agency.md" agency_type = AgencyType.FUNDING_TOPTIER category = Category(name="funding_agency", primary_field="funding_toptier_agency_name.keyword") class FundingSubagencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by funding subagencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/funding_subagency.md" agency_type = AgencyType.FUNDING_SUBTIER category = Category(name="funding_subagency", primary_field="funding_subtier_agency_name.keyword")
<filename>usaspending_api/search/v2/views/spending_by_category_views/spending_by_agency_types.py from usaspending_api.search.v2.views.spending_by_category_views.base_spending_by_agency import ( AgencyType, BaseAgencyViewSet, ) from usaspending_api.search.v2.views.spending_by_category_views.base_spending_by_category import Category class AwardingAgencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by awarding agencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/awarding_agency.md" agency_type = AgencyType.AWARDING_TOPTIER category = Category(name="awarding_agency", primary_field="awarding_toptier_agency_name.keyword") class AwardingSubagencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by awarding subagencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/awarding_subagency.md" agency_type = AgencyType.AWARDING_SUBTIER category = Category(name="awarding_subagency", primary_field="awarding_subtier_agency_name.keyword") class FundingAgencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by funding agencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/funding_agency.md" agency_type = AgencyType.FUNDING_TOPTIER category = Category(name="funding_agency", primary_field="funding_toptier_agency_name.keyword") class FundingSubagencyViewSet(BaseAgencyViewSet): """ This route takes award filters, and returns spending by funding subagencies. """ endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_category/funding_subagency.md" agency_type = AgencyType.FUNDING_SUBTIER category = Category(name="funding_subagency", primary_field="funding_subtier_agency_name.keyword")
en
0.907878
This route takes award filters, and returns spending by awarding agencies. This route takes award filters, and returns spending by awarding subagencies. This route takes award filters, and returns spending by funding agencies. This route takes award filters, and returns spending by funding subagencies.
1.949846
2
Biblio/BiblioAD.py
XimeHut/SimulacionCatalogos
0
6617641
class BiblioAD: def capturar(this,datos): # 1. Abrir el archivo archivo = open("Libros.txt","a") # 2. Escribir, guardar o almacenar los datos en el archivo archivo.write(datos+"\n") # 3. Cerrar el archivo archivo.close() return "Datos a capturar: "+datos def consultaGeneral(this): datos="" libro="" try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo libro = archivo.readline() while(libro != ""): datos = datos+libro libro = archivo.readline() # 3. Cerrar el archivo archivo.close() datos = "CONSULTA GENERAL:\n"+datos except: datos="ERROR" return datos def consultarEditorial(this, edit): datos="" libro="" encontrado=False try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo edit = edit+"\n" libro = archivo.readline() while(libro != ""): st = libro.split(".") second = st[2] #edit = edit+"\n" if(edit == st[2]): datos = datos + libro encontrado = True libro = archivo.readline() # 3. Cerrar el archivo archivo.close() if(not encontrado): datos = "ERROR" except: datos="ERROR ABRIENDO EL ARCHIVO" return datos def consultarTitulo(this, tit): datos="" libro="" encontrado=False try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo libro = archivo.readline() while(libro != ""): st = libro.split("_") if(tit == st[0]): datos = datos + libro encontrado = True libro = archivo.readline() # 3. Cerrar el archivo archivo.close() if(not encontrado): datos = "ERROR" except: datos="ERROR ABRIENDO EL ARCHIVO" return datos def consultarAutor(this, aut): datos="" libro="" encontrado=False try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo libro = archivo.readline() while(libro != ""): st = libro.split("_") if(aut == st[1]): datos = datos + libro encontrado = True libro = archivo.readline() # 3. Cerrar el archivo archivo.close() if(not encontrado): datos = "ERROR" except: datos="ERROR ABRIENDO EL ARCHIVO" return datos
class BiblioAD: def capturar(this,datos): # 1. Abrir el archivo archivo = open("Libros.txt","a") # 2. Escribir, guardar o almacenar los datos en el archivo archivo.write(datos+"\n") # 3. Cerrar el archivo archivo.close() return "Datos a capturar: "+datos def consultaGeneral(this): datos="" libro="" try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo libro = archivo.readline() while(libro != ""): datos = datos+libro libro = archivo.readline() # 3. Cerrar el archivo archivo.close() datos = "CONSULTA GENERAL:\n"+datos except: datos="ERROR" return datos def consultarEditorial(this, edit): datos="" libro="" encontrado=False try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo edit = edit+"\n" libro = archivo.readline() while(libro != ""): st = libro.split(".") second = st[2] #edit = edit+"\n" if(edit == st[2]): datos = datos + libro encontrado = True libro = archivo.readline() # 3. Cerrar el archivo archivo.close() if(not encontrado): datos = "ERROR" except: datos="ERROR ABRIENDO EL ARCHIVO" return datos def consultarTitulo(this, tit): datos="" libro="" encontrado=False try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo libro = archivo.readline() while(libro != ""): st = libro.split("_") if(tit == st[0]): datos = datos + libro encontrado = True libro = archivo.readline() # 3. Cerrar el archivo archivo.close() if(not encontrado): datos = "ERROR" except: datos="ERROR ABRIENDO EL ARCHIVO" return datos def consultarAutor(this, aut): datos="" libro="" encontrado=False try: # 1. Abrir el archivo archivo = open("Libros.txt","r") # 2. Procesar los datos del archivo libro = archivo.readline() while(libro != ""): st = libro.split("_") if(aut == st[1]): datos = datos + libro encontrado = True libro = archivo.readline() # 3. Cerrar el archivo archivo.close() if(not encontrado): datos = "ERROR" except: datos="ERROR ABRIENDO EL ARCHIVO" return datos
es
0.999803
# 1. Abrir el archivo # 2. Escribir, guardar o almacenar los datos en el archivo # 3. Cerrar el archivo # 1. Abrir el archivo # 2. Procesar los datos del archivo # 3. Cerrar el archivo # 1. Abrir el archivo # 2. Procesar los datos del archivo #edit = edit+"\n" # 3. Cerrar el archivo # 1. Abrir el archivo # 2. Procesar los datos del archivo # 3. Cerrar el archivo # 1. Abrir el archivo # 2. Procesar los datos del archivo # 3. Cerrar el archivo
3.304209
3
fastly/_version.py
cbsiamlg/fastly-py
0
6617642
<reponame>cbsiamlg/fastly-py<gh_stars>0 __version__ = '0.4.0+cbsiamlg'
__version__ = '0.4.0+cbsiamlg'
none
1
1.074023
1
AddTwoNummberInBase11.py
noob-coder-478/Hacktoberfest-21
0
6617643
# Function to get value of a numeral # For example it returns 10 for input 'A' # 1 for '1', etc def getNumeralValue(num) : if( num >= '0' and num <= '9') : return ord(num) - ord('0') if( num >= 'A' and num <= 'D') : return ord(num ) - ord('A') + 10 # Function to get numeral for a value. # For example it returns 'A' for input 10 # '1' for 1, etc def getNumeral(val): if( val >= 0 and val <= 9): return chr(val + ord('0')) if( val >= 10 and val <= 14) : return chr(val + ord('A') - 10) # Function to add two numbers in base 14 def sumBase14(num1, num2): l1 = len(num1) l2 = len(num2) carry = 0 if(l1 != l2) : print("Function doesn't support numbers of different" " lengths. If you want to add such numbers then" " prefix smaller number with required no. of zeroes") # Note the size of the allocated memory is one # more than i/p lengths for the cases where we # have carry at the last like adding D1 and A1 res = [0]*(l1 + 1) # dd all numerals from right to left for i in range(l1 - 1, -1, -1): # Get decimal values of the numerals of # i/p numbers nml1 = getNumeralValue(num1[i]) nml2 = getNumeralValue(num2[i]) # Add decimal values of numerals and carry res_nml = carry + nml1 + nml2; # Check if we have carry for next addition # of numerals if(res_nml >= 14) : carry = 1 res_nml -= 14 else: carry = 0 res[i+1] = getNumeral(res_nml) # if there is no carry after last iteration # then result should not include 0th character # of the resultant string if(carry == 0): return (res + 1) # if we have carry after last iteration then # result should include 0th character res[0] = '1' return res # Driver code if __name__ == "__main__": num1 = "DC2" num2 = "0A3" print("Result is ",end="") res = sumBase14(num1, num2) for i in range(len(res)): print(res[i],end="") # This code is contributed by chitranayal
# Function to get value of a numeral # For example it returns 10 for input 'A' # 1 for '1', etc def getNumeralValue(num) : if( num >= '0' and num <= '9') : return ord(num) - ord('0') if( num >= 'A' and num <= 'D') : return ord(num ) - ord('A') + 10 # Function to get numeral for a value. # For example it returns 'A' for input 10 # '1' for 1, etc def getNumeral(val): if( val >= 0 and val <= 9): return chr(val + ord('0')) if( val >= 10 and val <= 14) : return chr(val + ord('A') - 10) # Function to add two numbers in base 14 def sumBase14(num1, num2): l1 = len(num1) l2 = len(num2) carry = 0 if(l1 != l2) : print("Function doesn't support numbers of different" " lengths. If you want to add such numbers then" " prefix smaller number with required no. of zeroes") # Note the size of the allocated memory is one # more than i/p lengths for the cases where we # have carry at the last like adding D1 and A1 res = [0]*(l1 + 1) # dd all numerals from right to left for i in range(l1 - 1, -1, -1): # Get decimal values of the numerals of # i/p numbers nml1 = getNumeralValue(num1[i]) nml2 = getNumeralValue(num2[i]) # Add decimal values of numerals and carry res_nml = carry + nml1 + nml2; # Check if we have carry for next addition # of numerals if(res_nml >= 14) : carry = 1 res_nml -= 14 else: carry = 0 res[i+1] = getNumeral(res_nml) # if there is no carry after last iteration # then result should not include 0th character # of the resultant string if(carry == 0): return (res + 1) # if we have carry after last iteration then # result should include 0th character res[0] = '1' return res # Driver code if __name__ == "__main__": num1 = "DC2" num2 = "0A3" print("Result is ",end="") res = sumBase14(num1, num2) for i in range(len(res)): print(res[i],end="") # This code is contributed by chitranayal
en
0.803315
# Function to get value of a numeral # For example it returns 10 for input 'A' # 1 for '1', etc # Function to get numeral for a value. # For example it returns 'A' for input 10 # '1' for 1, etc # Function to add two numbers in base 14 # Note the size of the allocated memory is one # more than i/p lengths for the cases where we # have carry at the last like adding D1 and A1 # dd all numerals from right to left # Get decimal values of the numerals of # i/p numbers # Add decimal values of numerals and carry # Check if we have carry for next addition # of numerals # if there is no carry after last iteration # then result should not include 0th character # of the resultant string # if we have carry after last iteration then # result should include 0th character # Driver code # This code is contributed by chitranayal
4.134063
4
hf/category/dispatcher.py
HappyFaceGoettingen/HappyFaceCore
0
6617644
# -*- coding: utf-8 -*- # # Copyright 2012 Institut für Experimentelle Kernphysik - Karlsruher Institut für Technologie # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cherrypy as cp import hf, datetime, time, logging, traceback, os from hf.module.database import hf_runs import hf.plotgenerator from sqlalchemy import * from mako.template import Template import json class CategoryCachingTool(cp._cptools.CachingTool): """ Extends the default caching tool to distinguish between category page requests with and without explicit time parameter set. If no explicit time is given (-> most recent run shall be displayed) it is checked if the cached page is older than the most recent run in the database. If that is the case, all variants of the current URI is removed from cache (it is not possible to remove *this* variant as far as I know). """ def _wrapper(self, **kwargs): params = cp.serving.request.params # dirty check if we want "the most current" page # When HappyFace is first started, there is no cp._cache variable. # It is created by the first call to cp.lib.caching.get() ! if hasattr(cp, "_cache") and ("time" not in params or "date" not in params): cached_data = cp._cache.get() if cached_data is None: super(CategoryCachingTool, self)._wrapper(**kwargs) return cached_run_date = datetime.datetime.fromtimestamp(cached_data[3]) hf_runs = hf.module.database.hf_runs newest_run_date = select([hf_runs.c.time], hf_runs.c.completed==True).order_by(hf_runs.c.time.desc()).execute().fetchone()[0] if cached_run_date < newest_run_date: cp._cache.delete() super(CategoryCachingTool, self)._wrapper(**kwargs) _wrapper.priority = 20 cp.tools.category_caching = CategoryCachingTool('before_handler', cp.lib.caching.get, 'category_caching') class Dispatcher(object): """ Show a page for displaying the contents of a category. """ def __init__(self, category_list): self.logger = logging.getLogger(self.__module__) self.category_list = category_list def prepareDisplay(self, category=None, **kwargs): """ generate the data and template context required to display a page containing the navbar. :param string: Name of the ca :returns: tuple (template_context, category_dict, run) """ ''' Select a hf run based on the passed 'date' and 'time' parameters. If not specified, use most recent run. If they are specified, make sure they do not mark a point in the future. Because (usually) microseconds are stored in the database, too, we have to pad with extra 59 seconds (note: 59 because it will be the same minute but there will only be a single "dead" second, we can live with that). ''' time_error_message = '' time_obj = datetime.datetime.fromtimestamp(int(time.time())) try: timestamp = kwargs['date'] if 'date' in kwargs is not None else time_obj.strftime('%Y-%m-%d') timestamp += '_' + (kwargs['time'] if 'time' in kwargs else time_obj.strftime('%H:%M')) # notice the extra seconds to avoid microsecond and minute issues time_obj = datetime.datetime.fromtimestamp(time.mktime(time.strptime(timestamp, "%Y-%m-%d_%H:%M"))+59) except Exception: time_error_message = "The passed time was invalid" if time_obj > datetime.datetime.fromtimestamp(int(time.time())+59): time_error_message = "HappyFace is not an oracle" time_obj = datetime.datetime.fromtimestamp(int(time.time())+59) try: test = hf_runs.select().execute().fetchone() self.logger.error("Test "+str(test)) except Exception, e: self.logger.error(traceback.format_exc()) run = hf_runs.select(hf_runs.c.time <= time_obj).\ where(or_(hf_runs.c.completed==True, hf_runs.c.completed==None)).\ order_by(hf_runs.c.time.desc()).\ execute().fetchone() if run is None: time_error_message = "No data so far in past" run = hf_runs.select(hf_runs.c.time >= time_obj).\ where(or_(hf_runs.c.completed==True, hf_runs.c.completed==None)).\ order_by(hf_runs.c.time.asc()).\ execute().fetchone() time_obj = run["time"] run = {"id":run["id"], "time":run["time"]} # if the run is older than a certain time threshold, # then mark it as stale stale_threshold = datetime.timedelta(0, 0, 0, 0,\ int(hf.config.get('happyface', 'stale_data_threshold_minutes'))) data_stale = (run['time'] + stale_threshold) < datetime.datetime.now() run['stale'] = data_stale category_list = [cat.getCategory(run) for cat in self.category_list] category_dict = dict((cat.name, cat) for cat in category_list) selected_category = None for c in category_list: if c.name == category: selected_category = c break lock_icon = 'lock_icon_on.png' if cp.request.cert_authorized else 'lock_icon_off.png' lock_icon = os.path.join(hf.config.get('paths', 'template_icons_url'), lock_icon) template_context = { "static_url": hf.config.get('paths', 'static_url'), "happyface_url": hf.config.get('paths', 'happyface_url'), "category_list": category_list, "module_list": [], "hf": hf, "time_specified": ('date' in kwargs or 'time' in kwargs), "date_string": time_obj.strftime('%Y-%m-%d'), "time_string": time_obj.strftime('%H:%M'), "histo_step": kwargs['s'] if 's' in kwargs else "00:15", "run": run, 'selected_module': None, 'selected_category': selected_category, 'time_error_message': time_error_message, 'data_stale': data_stale, 'svn_rev': hf.__version__, 'lock_icon': lock_icon, 'include_time_in_url': ('date' in kwargs or 'time' in kwargs), 'automatic_reload': not ('date' in kwargs or 'time' in kwargs), 'reload_interval': int(hf.config.get('happyface', 'reload_interval')), } for cat in category_list: template_context["module_list"].extend(cat.module_list) return template_context, category_dict, run @cp.expose @cp.tools.category_caching() def default(self, category=None, **kwargs): try: # Don't HTTP cache, when no explicit time is set if "date" in kwargs or "time" in kwargs: cp.lib.caching.expires(secs=3600, force=True) else: cp.lib.caching.expires(secs=1, force=True) template_context, category_dict, run = self.prepareDisplay(category, **kwargs) doc = u"" if 'action' in kwargs: if kwargs['action'].lower() == 'getxml': template_context['category_list'] = filter(lambda x: not x.isUnauthorized(), template_context['category_list']) doc = hf.category.renderXmlOverview(run, template_context) else: doc = u'''<h2>Unkown action</h2> <p>The specified action <em>%s</em> is not known.<p>''' % kwargs['action'] else: ''' Show the requested category or a 'blank' overview if no category is specified. ''' if category is None: filename = os.path.join(hf.hf_dir, hf.config.get("paths", "hf_template_dir"), "index.html") index_template = Template(filename=filename, lookup=hf.template_lookup) doc = index_template.render_unicode(**template_context) elif category is not None and not category in category_dict: raise cp.HTTPError(404) elif category is not None: if run is not None: doc = category_dict[category].render(template_context) else: doc = u"<h2>No data found at this time</h2>" return doc except cp.CherryPyException: raise except Exception, e: self.logger.error("Page request threw exception: %s" % str(e)) self.logger.error(traceback.format_exc()) raise class AjaxDispatcher: def __init__(self, category_list): self.logger = logging.getLogger(self.__module__) self.category_list = category_list self.modules = {} for category in self.category_list: for module in category.module_list: self.modules[module.instance_name] = module self.logger.debug(self.modules) @cp.expose @cp.tools.caching() def default(self, module, run_id, **kwargs): response = {"status": "unkown", "data": []} try: if module not in self.modules: raise Exception("Module not found") module = self.modules[module] if module.isUnauthorized(): raise cp.HTTPError(status=403, message="You are not allowed to access this resource.") run = hf_runs.select(hf_runs.c.id==run_id).execute().fetchone() if run is None: raise cp.HTTPError(status=404, message="The specified run ID was not found!") specific_module = module.getModule(run) if not hasattr(specific_module, "ajax"): raise cp.HTTPError(status=404, message="Module does not export data via Ajax") if specific_module.error_string: raise Exception(specific_module.error_string) if specific_module.dataset is None: raise cp.HTTPError(status=404, message="No data at this time") self.logger.debug(specific_module.error_string, specific_module.dataset) response["data"] = specific_module.ajax(**kwargs) response["status"] = "success" cp.lib.caching.expires(secs=9999999, force=True) # ajax data never goes bad, since it is supposed to be static except cp.HTTPError, e: cp.lib.caching.expires(secs=0, force=False) response = { "status": "error", "code": e.code, "reason": "%i: %s" % (e.code, e.reason), "data": [] } except Exception, e: cp.lib.caching.expires(secs=30, force=True) # ajax data never goes bad, since it is supposed to be static self.logger.error("Ajax request threw exception: %s" % str(e)) self.logger.error(traceback.format_exc()) response = { "status": "error", "code": 500, "reason": str(e), "data":[] } finally: return json.dumps(response)
# -*- coding: utf-8 -*- # # Copyright 2012 Institut für Experimentelle Kernphysik - Karlsruher Institut für Technologie # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cherrypy as cp import hf, datetime, time, logging, traceback, os from hf.module.database import hf_runs import hf.plotgenerator from sqlalchemy import * from mako.template import Template import json class CategoryCachingTool(cp._cptools.CachingTool): """ Extends the default caching tool to distinguish between category page requests with and without explicit time parameter set. If no explicit time is given (-> most recent run shall be displayed) it is checked if the cached page is older than the most recent run in the database. If that is the case, all variants of the current URI is removed from cache (it is not possible to remove *this* variant as far as I know). """ def _wrapper(self, **kwargs): params = cp.serving.request.params # dirty check if we want "the most current" page # When HappyFace is first started, there is no cp._cache variable. # It is created by the first call to cp.lib.caching.get() ! if hasattr(cp, "_cache") and ("time" not in params or "date" not in params): cached_data = cp._cache.get() if cached_data is None: super(CategoryCachingTool, self)._wrapper(**kwargs) return cached_run_date = datetime.datetime.fromtimestamp(cached_data[3]) hf_runs = hf.module.database.hf_runs newest_run_date = select([hf_runs.c.time], hf_runs.c.completed==True).order_by(hf_runs.c.time.desc()).execute().fetchone()[0] if cached_run_date < newest_run_date: cp._cache.delete() super(CategoryCachingTool, self)._wrapper(**kwargs) _wrapper.priority = 20 cp.tools.category_caching = CategoryCachingTool('before_handler', cp.lib.caching.get, 'category_caching') class Dispatcher(object): """ Show a page for displaying the contents of a category. """ def __init__(self, category_list): self.logger = logging.getLogger(self.__module__) self.category_list = category_list def prepareDisplay(self, category=None, **kwargs): """ generate the data and template context required to display a page containing the navbar. :param string: Name of the ca :returns: tuple (template_context, category_dict, run) """ ''' Select a hf run based on the passed 'date' and 'time' parameters. If not specified, use most recent run. If they are specified, make sure they do not mark a point in the future. Because (usually) microseconds are stored in the database, too, we have to pad with extra 59 seconds (note: 59 because it will be the same minute but there will only be a single "dead" second, we can live with that). ''' time_error_message = '' time_obj = datetime.datetime.fromtimestamp(int(time.time())) try: timestamp = kwargs['date'] if 'date' in kwargs is not None else time_obj.strftime('%Y-%m-%d') timestamp += '_' + (kwargs['time'] if 'time' in kwargs else time_obj.strftime('%H:%M')) # notice the extra seconds to avoid microsecond and minute issues time_obj = datetime.datetime.fromtimestamp(time.mktime(time.strptime(timestamp, "%Y-%m-%d_%H:%M"))+59) except Exception: time_error_message = "The passed time was invalid" if time_obj > datetime.datetime.fromtimestamp(int(time.time())+59): time_error_message = "HappyFace is not an oracle" time_obj = datetime.datetime.fromtimestamp(int(time.time())+59) try: test = hf_runs.select().execute().fetchone() self.logger.error("Test "+str(test)) except Exception, e: self.logger.error(traceback.format_exc()) run = hf_runs.select(hf_runs.c.time <= time_obj).\ where(or_(hf_runs.c.completed==True, hf_runs.c.completed==None)).\ order_by(hf_runs.c.time.desc()).\ execute().fetchone() if run is None: time_error_message = "No data so far in past" run = hf_runs.select(hf_runs.c.time >= time_obj).\ where(or_(hf_runs.c.completed==True, hf_runs.c.completed==None)).\ order_by(hf_runs.c.time.asc()).\ execute().fetchone() time_obj = run["time"] run = {"id":run["id"], "time":run["time"]} # if the run is older than a certain time threshold, # then mark it as stale stale_threshold = datetime.timedelta(0, 0, 0, 0,\ int(hf.config.get('happyface', 'stale_data_threshold_minutes'))) data_stale = (run['time'] + stale_threshold) < datetime.datetime.now() run['stale'] = data_stale category_list = [cat.getCategory(run) for cat in self.category_list] category_dict = dict((cat.name, cat) for cat in category_list) selected_category = None for c in category_list: if c.name == category: selected_category = c break lock_icon = 'lock_icon_on.png' if cp.request.cert_authorized else 'lock_icon_off.png' lock_icon = os.path.join(hf.config.get('paths', 'template_icons_url'), lock_icon) template_context = { "static_url": hf.config.get('paths', 'static_url'), "happyface_url": hf.config.get('paths', 'happyface_url'), "category_list": category_list, "module_list": [], "hf": hf, "time_specified": ('date' in kwargs or 'time' in kwargs), "date_string": time_obj.strftime('%Y-%m-%d'), "time_string": time_obj.strftime('%H:%M'), "histo_step": kwargs['s'] if 's' in kwargs else "00:15", "run": run, 'selected_module': None, 'selected_category': selected_category, 'time_error_message': time_error_message, 'data_stale': data_stale, 'svn_rev': hf.__version__, 'lock_icon': lock_icon, 'include_time_in_url': ('date' in kwargs or 'time' in kwargs), 'automatic_reload': not ('date' in kwargs or 'time' in kwargs), 'reload_interval': int(hf.config.get('happyface', 'reload_interval')), } for cat in category_list: template_context["module_list"].extend(cat.module_list) return template_context, category_dict, run @cp.expose @cp.tools.category_caching() def default(self, category=None, **kwargs): try: # Don't HTTP cache, when no explicit time is set if "date" in kwargs or "time" in kwargs: cp.lib.caching.expires(secs=3600, force=True) else: cp.lib.caching.expires(secs=1, force=True) template_context, category_dict, run = self.prepareDisplay(category, **kwargs) doc = u"" if 'action' in kwargs: if kwargs['action'].lower() == 'getxml': template_context['category_list'] = filter(lambda x: not x.isUnauthorized(), template_context['category_list']) doc = hf.category.renderXmlOverview(run, template_context) else: doc = u'''<h2>Unkown action</h2> <p>The specified action <em>%s</em> is not known.<p>''' % kwargs['action'] else: ''' Show the requested category or a 'blank' overview if no category is specified. ''' if category is None: filename = os.path.join(hf.hf_dir, hf.config.get("paths", "hf_template_dir"), "index.html") index_template = Template(filename=filename, lookup=hf.template_lookup) doc = index_template.render_unicode(**template_context) elif category is not None and not category in category_dict: raise cp.HTTPError(404) elif category is not None: if run is not None: doc = category_dict[category].render(template_context) else: doc = u"<h2>No data found at this time</h2>" return doc except cp.CherryPyException: raise except Exception, e: self.logger.error("Page request threw exception: %s" % str(e)) self.logger.error(traceback.format_exc()) raise class AjaxDispatcher: def __init__(self, category_list): self.logger = logging.getLogger(self.__module__) self.category_list = category_list self.modules = {} for category in self.category_list: for module in category.module_list: self.modules[module.instance_name] = module self.logger.debug(self.modules) @cp.expose @cp.tools.caching() def default(self, module, run_id, **kwargs): response = {"status": "unkown", "data": []} try: if module not in self.modules: raise Exception("Module not found") module = self.modules[module] if module.isUnauthorized(): raise cp.HTTPError(status=403, message="You are not allowed to access this resource.") run = hf_runs.select(hf_runs.c.id==run_id).execute().fetchone() if run is None: raise cp.HTTPError(status=404, message="The specified run ID was not found!") specific_module = module.getModule(run) if not hasattr(specific_module, "ajax"): raise cp.HTTPError(status=404, message="Module does not export data via Ajax") if specific_module.error_string: raise Exception(specific_module.error_string) if specific_module.dataset is None: raise cp.HTTPError(status=404, message="No data at this time") self.logger.debug(specific_module.error_string, specific_module.dataset) response["data"] = specific_module.ajax(**kwargs) response["status"] = "success" cp.lib.caching.expires(secs=9999999, force=True) # ajax data never goes bad, since it is supposed to be static except cp.HTTPError, e: cp.lib.caching.expires(secs=0, force=False) response = { "status": "error", "code": e.code, "reason": "%i: %s" % (e.code, e.reason), "data": [] } except Exception, e: cp.lib.caching.expires(secs=30, force=True) # ajax data never goes bad, since it is supposed to be static self.logger.error("Ajax request threw exception: %s" % str(e)) self.logger.error(traceback.format_exc()) response = { "status": "error", "code": 500, "reason": str(e), "data":[] } finally: return json.dumps(response)
en
0.822119
# -*- coding: utf-8 -*- # # Copyright 2012 Institut für Experimentelle Kernphysik - Karlsruher Institut für Technologie # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Extends the default caching tool to distinguish between category page requests with and without explicit time parameter set. If no explicit time is given (-> most recent run shall be displayed) it is checked if the cached page is older than the most recent run in the database. If that is the case, all variants of the current URI is removed from cache (it is not possible to remove *this* variant as far as I know). # dirty check if we want "the most current" page # When HappyFace is first started, there is no cp._cache variable. # It is created by the first call to cp.lib.caching.get() ! Show a page for displaying the contents of a category. generate the data and template context required to display a page containing the navbar. :param string: Name of the ca :returns: tuple (template_context, category_dict, run) Select a hf run based on the passed 'date' and 'time' parameters. If not specified, use most recent run. If they are specified, make sure they do not mark a point in the future. Because (usually) microseconds are stored in the database, too, we have to pad with extra 59 seconds (note: 59 because it will be the same minute but there will only be a single "dead" second, we can live with that). # notice the extra seconds to avoid microsecond and minute issues # if the run is older than a certain time threshold, # then mark it as stale # Don't HTTP cache, when no explicit time is set <h2>Unkown action</h2> <p>The specified action <em>%s</em> is not known.<p> Show the requested category or a 'blank' overview if no category is specified. # ajax data never goes bad, since it is supposed to be static # ajax data never goes bad, since it is supposed to be static
1.884325
2
tests/utils/test_npp.py
marblejenka/sawatabi
12
6617645
# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest from sawatabi.utils.npp import solve_npp_with_dp @pytest.mark.parametrize( "numbers,ans,golden", [ ([1, 2, 4, 5], True, [[0, 3], [1, 2]]), # sum: even, exactly partitioned: True ([1, 2, 4, 9], False, [[0, 1, 2]]), # sum: even, exactly partitioned: False ([1, 2, 4, 6], False, [[1, 2], [3]]), # sum: odd, exactly partitioned: False ], ) def test_solve_npp_with_dp(numbers, ans, golden): solution = solve_npp_with_dp(numbers, enumerate_all=True) assert solution[0] == ans if solution[0]: for g in golden: assert g in solution[1] assert g in solution[2] else: for g in golden: assert g in solution[1] @pytest.mark.parametrize("seed", [1, 2, 3, 4, 5]) def test_solve_npp_with_dp_random(seed): np.random.seed(seed) numbers = list(np.random.randint(low=1, high=99, size=100)) solution = solve_npp_with_dp(numbers) s1 = sum(numbers[i] for i in solution[1][0]) s2 = sum(numbers[i] for i in solution[2][0]) if solution[0]: assert s1 == s2 else: assert s1 + 1 == s2 def test_solve_npp_with_dp_with_dp_table(capfd): solution = solve_npp_with_dp(numbers=[1, 1, 2, 3, 5, 8, 13, 21], enumerate_all=True, print_dp_table=True) golden = [ [0, 4, 7], [1, 4, 7], [0, 2, 3, 7], [1, 2, 3, 7], [0, 4, 5, 6], [1, 4, 5, 6], [0, 2, 3, 5, 6], [1, 2, 3, 5, 6], ] assert solution[0] for g in golden: assert g in solution[1] assert g in solution[2] out, err = capfd.readouterr() assert "dp:" in out assert "(True, [[0, 4, 7], [1, 4, 7], [0, 2, 3, 7], [1, 2, 3, 7], [0, 4, 5, 6], [1, 4, 5, 6], [0, 2, 3, 5, 6], [1, 2, 3, 5, 6]])" in out
# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest from sawatabi.utils.npp import solve_npp_with_dp @pytest.mark.parametrize( "numbers,ans,golden", [ ([1, 2, 4, 5], True, [[0, 3], [1, 2]]), # sum: even, exactly partitioned: True ([1, 2, 4, 9], False, [[0, 1, 2]]), # sum: even, exactly partitioned: False ([1, 2, 4, 6], False, [[1, 2], [3]]), # sum: odd, exactly partitioned: False ], ) def test_solve_npp_with_dp(numbers, ans, golden): solution = solve_npp_with_dp(numbers, enumerate_all=True) assert solution[0] == ans if solution[0]: for g in golden: assert g in solution[1] assert g in solution[2] else: for g in golden: assert g in solution[1] @pytest.mark.parametrize("seed", [1, 2, 3, 4, 5]) def test_solve_npp_with_dp_random(seed): np.random.seed(seed) numbers = list(np.random.randint(low=1, high=99, size=100)) solution = solve_npp_with_dp(numbers) s1 = sum(numbers[i] for i in solution[1][0]) s2 = sum(numbers[i] for i in solution[2][0]) if solution[0]: assert s1 == s2 else: assert s1 + 1 == s2 def test_solve_npp_with_dp_with_dp_table(capfd): solution = solve_npp_with_dp(numbers=[1, 1, 2, 3, 5, 8, 13, 21], enumerate_all=True, print_dp_table=True) golden = [ [0, 4, 7], [1, 4, 7], [0, 2, 3, 7], [1, 2, 3, 7], [0, 4, 5, 6], [1, 4, 5, 6], [0, 2, 3, 5, 6], [1, 2, 3, 5, 6], ] assert solution[0] for g in golden: assert g in solution[1] assert g in solution[2] out, err = capfd.readouterr() assert "dp:" in out assert "(True, [[0, 4, 7], [1, 4, 7], [0, 2, 3, 7], [1, 2, 3, 7], [0, 4, 5, 6], [1, 4, 5, 6], [0, 2, 3, 5, 6], [1, 2, 3, 5, 6]])" in out
en
0.83611
# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # sum: even, exactly partitioned: True # sum: even, exactly partitioned: False # sum: odd, exactly partitioned: False
2.405996
2
improviser-client/improviser_client/models/__init__.py
acidjunk/improviser-api-client
0
6617646
<filename>improviser-client/improviser_client/models/__init__.py<gh_stars>0 """ Contains all the data models used in inputs/outputs """ from .exercise import Exercise from .exercise_to_tag import ExerciseToTag from .recent_exercise import RecentExercise from .rendered_riff import RenderedRiff from .riff import Riff from .riff_to_tag import RiffToTag from .tag import Tag from .user_preference import UserPreference
<filename>improviser-client/improviser_client/models/__init__.py<gh_stars>0 """ Contains all the data models used in inputs/outputs """ from .exercise import Exercise from .exercise_to_tag import ExerciseToTag from .recent_exercise import RecentExercise from .rendered_riff import RenderedRiff from .riff import Riff from .riff_to_tag import RiffToTag from .tag import Tag from .user_preference import UserPreference
en
0.658013
Contains all the data models used in inputs/outputs
1.117377
1
1024.py
OmangRawat/Leetcode
0
6617647
""" ---> Video Stitching ---> Medium """ import cmath class Solution: def videoStitching(self, clips, time: int) -> int: dp = [cmath.inf] * (time + 1) dp[0] = 0 for i in range(1, time + 1): for start, end in clips: if start <= i <= end: dp[i] = min(dp[start] + 1, dp[i]) if dp[time] == cmath.inf: return -1 return dp[time] def videoStitching_sol2(self, clips, time: int) -> int: in_clips = [[0, 2], [4, 6], [8, 10], [1, 9], [1, 5], [5, 9]] in_time = 10 a = Solution() print(a.videoStitching(in_clips, in_time)) """ """
""" ---> Video Stitching ---> Medium """ import cmath class Solution: def videoStitching(self, clips, time: int) -> int: dp = [cmath.inf] * (time + 1) dp[0] = 0 for i in range(1, time + 1): for start, end in clips: if start <= i <= end: dp[i] = min(dp[start] + 1, dp[i]) if dp[time] == cmath.inf: return -1 return dp[time] def videoStitching_sol2(self, clips, time: int) -> int: in_clips = [[0, 2], [4, 6], [8, 10], [1, 9], [1, 5], [5, 9]] in_time = 10 a = Solution() print(a.videoStitching(in_clips, in_time)) """ """
ca
0.21376
---> Video Stitching ---> Medium
3.157528
3
scraper.py
mehranhussain/scraper
0
6617648
from urllib.request import urlopen from bs4 import BeautifulSoup html = urlopen('https://baroque.pk/collections/lawn') bs = BeautifulSoup(html.read(), 'html.parser') nameList = bs.find('script', {'type':'text/template'}) print(nameList)
from urllib.request import urlopen from bs4 import BeautifulSoup html = urlopen('https://baroque.pk/collections/lawn') bs = BeautifulSoup(html.read(), 'html.parser') nameList = bs.find('script', {'type':'text/template'}) print(nameList)
none
1
2.887634
3
libs/workload.py
osswangxining/istio-bench
25
6617649
<reponame>osswangxining/istio-bench<filename>libs/workload.py # Copyright 2020 Istio-Bench Authors and Hitachi Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from logging import getLogger from libs import command class Workload: """ Deploy/Delete Kubernetes resources(namespace, pod, service) as workload """ def __init__(self, delete_label, prefix, test_id): self.logger = getLogger(__name__) self.label = delete_label self.prefix = prefix self.test_id = test_id self.name_index = 0 # Do not use this variable outside of __get_unique_postfix. self.ns_template = None self.svc_template = None self.dep_template = None self.aggregated_mode = True self.aggregated_num = 10 def load_template(self, kind: str, filepath: str): manifest = "" try: with open(filepath) as f: manifest = f.read() except Exception as e: self.logger.error("Failed to load manifest in {}".format(filepath)) raise e if kind.lower() == "namespace": self.ns_template = manifest elif kind.lower() == "deployment": self.dep_template = manifest elif kind.lower() == "service": self.svc_template = manifest else: raise TypeError( "{} is unsupported. Please use service, deployment, or namespace".format(kind) ) def generate(self, number_of_services: int = 50) -> str: """ Create a namespace, a deployment, and N services Returns ------- namespace : str Name of Namespace generated as a workload """ ns_name = self.__get_unique_name() self.logger.info("Create namespace: {ns}".format(ns=ns_name)) ns_manifest = self.ns_template.format(name=ns_name) self.__deploy_manifest( manifest=ns_manifest, namespace=None, resource_type="namespace", resource_name=ns_name) dep_name = self.__get_unique_name() self.logger.info("Create deployment: {dep}".format(dep=dep_name)) dep_manifest = self.dep_template.format(name=dep_name) self.__deploy_manifest( manifest=dep_manifest, namespace=ns_name, resource_type="deployment", resource_name=dep_name) self.logger.info("Create services") # [m1, m2, m3, m4, ... ,mn] svcs = [ self.svc_template.format( name=self.__get_unique_name(), deployment_name=dep_name ) for _ in range(number_of_services)] if self.aggregated_mode: agg = self.aggregated_num # [[m1, m2, m3],[m4,m5,m6], ... [m(n-1), mn]] svcs = [svcs[i:i + agg] for i in range(0, len(svcs), agg)] svcs = ["\n---\n".join(i) for i in svcs] # yaml aggregate sys.stdout.write(" Progress: 0%") total = len(svcs) for i, s in enumerate(svcs): self.__deploy_manifest( manifest=s, namespace=ns_name, resource_type="service", resource_name="services") sys.stdout.write("\r Progress: {}%".format(int((i + 1) * 100 / total))) sys.stdout.flush() sys.stdout.write("\n") sys.stdout.flush() return ns_name def __deploy_manifest(self, manifest, namespace=None, resource_type=None, resource_name=None): ns = " --namespace {}".format(namespace) if namespace else "" cmd = "cat << EOF | kubectl {ns} apply -f -\n{manifest}EOF".format( ns=ns, manifest=manifest ) try: resp = command.run_sync(cmd) except Exception as e: self.logger.error("Failed to create {} {}".format( resource_type, resource_name) ) raise e self.logger.info("Deploy {} - {}".format(resource_type, resource_name)) self.logger.debug("Deploy result: {}".format(resp)) def wait(self, namespace, resource, condition, timeout="300s"): """ Wait for a specific condition on deployed/deleted resources """ # This function use timeout command. because --timeout option in # <kubectl wait> does not mean process timeout. # --timeout is interval of sending GET resource to api-server cmd = "timeout {timeout} kubectl --namespace {namespace} wait {resource} \ --for=condition={condition} --selector={label}".format( namespace=namespace, resource=resource, condition=condition, label=self.label, timeout=timeout, ) command.run_sync(cmd) def reset(self): """ Delete all namespaces generated by this script """ self.logger.info("Delete all namespaces that has {} label".format(self.label)) cmd = "kubectl delete namespace --selector={}".format(self.label) resp = command.run_sync(cmd) if resp: self.logger.debug("Result: {}".format(resp)) def __get_unique_name(self) -> str: """ Get postfix of Service, Deployment, Namespace """ idx = self.name_index self.name_index += 1 return "{}-{:04}-{}".format(self.prefix, idx, self.test_id)
# Copyright 2020 Istio-Bench Authors and Hitachi Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from logging import getLogger from libs import command class Workload: """ Deploy/Delete Kubernetes resources(namespace, pod, service) as workload """ def __init__(self, delete_label, prefix, test_id): self.logger = getLogger(__name__) self.label = delete_label self.prefix = prefix self.test_id = test_id self.name_index = 0 # Do not use this variable outside of __get_unique_postfix. self.ns_template = None self.svc_template = None self.dep_template = None self.aggregated_mode = True self.aggregated_num = 10 def load_template(self, kind: str, filepath: str): manifest = "" try: with open(filepath) as f: manifest = f.read() except Exception as e: self.logger.error("Failed to load manifest in {}".format(filepath)) raise e if kind.lower() == "namespace": self.ns_template = manifest elif kind.lower() == "deployment": self.dep_template = manifest elif kind.lower() == "service": self.svc_template = manifest else: raise TypeError( "{} is unsupported. Please use service, deployment, or namespace".format(kind) ) def generate(self, number_of_services: int = 50) -> str: """ Create a namespace, a deployment, and N services Returns ------- namespace : str Name of Namespace generated as a workload """ ns_name = self.__get_unique_name() self.logger.info("Create namespace: {ns}".format(ns=ns_name)) ns_manifest = self.ns_template.format(name=ns_name) self.__deploy_manifest( manifest=ns_manifest, namespace=None, resource_type="namespace", resource_name=ns_name) dep_name = self.__get_unique_name() self.logger.info("Create deployment: {dep}".format(dep=dep_name)) dep_manifest = self.dep_template.format(name=dep_name) self.__deploy_manifest( manifest=dep_manifest, namespace=ns_name, resource_type="deployment", resource_name=dep_name) self.logger.info("Create services") # [m1, m2, m3, m4, ... ,mn] svcs = [ self.svc_template.format( name=self.__get_unique_name(), deployment_name=dep_name ) for _ in range(number_of_services)] if self.aggregated_mode: agg = self.aggregated_num # [[m1, m2, m3],[m4,m5,m6], ... [m(n-1), mn]] svcs = [svcs[i:i + agg] for i in range(0, len(svcs), agg)] svcs = ["\n---\n".join(i) for i in svcs] # yaml aggregate sys.stdout.write(" Progress: 0%") total = len(svcs) for i, s in enumerate(svcs): self.__deploy_manifest( manifest=s, namespace=ns_name, resource_type="service", resource_name="services") sys.stdout.write("\r Progress: {}%".format(int((i + 1) * 100 / total))) sys.stdout.flush() sys.stdout.write("\n") sys.stdout.flush() return ns_name def __deploy_manifest(self, manifest, namespace=None, resource_type=None, resource_name=None): ns = " --namespace {}".format(namespace) if namespace else "" cmd = "cat << EOF | kubectl {ns} apply -f -\n{manifest}EOF".format( ns=ns, manifest=manifest ) try: resp = command.run_sync(cmd) except Exception as e: self.logger.error("Failed to create {} {}".format( resource_type, resource_name) ) raise e self.logger.info("Deploy {} - {}".format(resource_type, resource_name)) self.logger.debug("Deploy result: {}".format(resp)) def wait(self, namespace, resource, condition, timeout="300s"): """ Wait for a specific condition on deployed/deleted resources """ # This function use timeout command. because --timeout option in # <kubectl wait> does not mean process timeout. # --timeout is interval of sending GET resource to api-server cmd = "timeout {timeout} kubectl --namespace {namespace} wait {resource} \ --for=condition={condition} --selector={label}".format( namespace=namespace, resource=resource, condition=condition, label=self.label, timeout=timeout, ) command.run_sync(cmd) def reset(self): """ Delete all namespaces generated by this script """ self.logger.info("Delete all namespaces that has {} label".format(self.label)) cmd = "kubectl delete namespace --selector={}".format(self.label) resp = command.run_sync(cmd) if resp: self.logger.debug("Result: {}".format(resp)) def __get_unique_name(self) -> str: """ Get postfix of Service, Deployment, Namespace """ idx = self.name_index self.name_index += 1 return "{}-{:04}-{}".format(self.prefix, idx, self.test_id)
en
0.785659
# Copyright 2020 Istio-Bench Authors and Hitachi Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Deploy/Delete Kubernetes resources(namespace, pod, service) as workload # Do not use this variable outside of __get_unique_postfix. Create a namespace, a deployment, and N services Returns ------- namespace : str Name of Namespace generated as a workload # [m1, m2, m3, m4, ... ,mn] # [[m1, m2, m3],[m4,m5,m6], ... [m(n-1), mn]] # yaml aggregate Wait for a specific condition on deployed/deleted resources # This function use timeout command. because --timeout option in # <kubectl wait> does not mean process timeout. # --timeout is interval of sending GET resource to api-server Delete all namespaces generated by this script Get postfix of Service, Deployment, Namespace
1.786082
2
LEDs/ledsOn_1.py
ibrito/RaspberryPi
0
6617650
<reponame>ibrito/RaspberryPi<gh_stars>0 #!/usr/bin/python # -*- coding: utf-8 -*- #+++++++++++++++++++++++++++++ #title :ledsOn_1.py #description :Script en python para encender un LED #author :<NAME> #date :21-12-2017 #Placa :Raspberry Pi B+ V1.2 #lenguaje :Python #++++++++++++++++++++++++++++++ import RPi.GPIO as L import time L.setmode(L.BCM) # inicializar la placa en modo BCM L.setwarnings(False) # se apagan las advertencias listaGPIO = [16] # aca se cargaran la GPIO que se quieren controlar #--- Definicion de funciones def iniGPIO(lista): # funcion para inicializar las GPIO como salidad listadas en listaGPIO for pin in lista: L.setup(pin,L.OUT) def offTodo(lista): # funcion para apagar o poner en 0 todas las GPIO de la listaGPIO for pin in lista: L.output(pin,L.LOW) def onTodo(lista): # funcion para encender o poner en 1 todas las GPIO de la listaGPIO for pin in lista: L.output(pin,L.HIGH) #--- Ejecucion iniGPIO(listaGPIO) # se configuran todas las salidas definidas en listaGPIO offTodo(listaGPIO) onTodo(listaGPIO) # se le envia la orden de poner en 1 todas las salidas de la listaGPIO time.sleep(2) # detiene la ejecución por 2 segundos L.cleanup() # Limpia los canales que se usaron el listaGPIO
#!/usr/bin/python # -*- coding: utf-8 -*- #+++++++++++++++++++++++++++++ #title :ledsOn_1.py #description :Script en python para encender un LED #author :<NAME> #date :21-12-2017 #Placa :Raspberry Pi B+ V1.2 #lenguaje :Python #++++++++++++++++++++++++++++++ import RPi.GPIO as L import time L.setmode(L.BCM) # inicializar la placa en modo BCM L.setwarnings(False) # se apagan las advertencias listaGPIO = [16] # aca se cargaran la GPIO que se quieren controlar #--- Definicion de funciones def iniGPIO(lista): # funcion para inicializar las GPIO como salidad listadas en listaGPIO for pin in lista: L.setup(pin,L.OUT) def offTodo(lista): # funcion para apagar o poner en 0 todas las GPIO de la listaGPIO for pin in lista: L.output(pin,L.LOW) def onTodo(lista): # funcion para encender o poner en 1 todas las GPIO de la listaGPIO for pin in lista: L.output(pin,L.HIGH) #--- Ejecucion iniGPIO(listaGPIO) # se configuran todas las salidas definidas en listaGPIO offTodo(listaGPIO) onTodo(listaGPIO) # se le envia la orden de poner en 1 todas las salidas de la listaGPIO time.sleep(2) # detiene la ejecución por 2 segundos L.cleanup() # Limpia los canales que se usaron el listaGPIO
es
0.808535
#!/usr/bin/python # -*- coding: utf-8 -*- #+++++++++++++++++++++++++++++ #title :ledsOn_1.py #description :Script en python para encender un LED #author :<NAME> #date :21-12-2017 #Placa :Raspberry Pi B+ V1.2 #lenguaje :Python #++++++++++++++++++++++++++++++ # inicializar la placa en modo BCM # se apagan las advertencias # aca se cargaran la GPIO que se quieren controlar #--- Definicion de funciones # funcion para inicializar las GPIO como salidad listadas en listaGPIO # funcion para apagar o poner en 0 todas las GPIO de la listaGPIO # funcion para encender o poner en 1 todas las GPIO de la listaGPIO #--- Ejecucion # se configuran todas las salidas definidas en listaGPIO # se le envia la orden de poner en 1 todas las salidas de la listaGPIO # detiene la ejecución por 2 segundos # Limpia los canales que se usaron el listaGPIO
3.273698
3