max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
2015/01/fc_2015_01_07.py
mfwarren/FreeCoding
0
6629751
<reponame>mfwarren/FreeCoding<gh_stars>0 #!/usr/bin/env python3 # imports go here import os import gspread import datetime import argparse from oauth2client.client import OAuth2WebServerFlow from oauth2client import tools from oauth2client.file import Storage import smtplib from email.mime.text import MIMEText # # Free Coding session for 2015-01-07 # Written by <NAME> # GMAIL_LOGIN = os.getenv("EMAIL_USERNAME") GMAIL_PASSWORD = os.getenv("EMAIL_PASSWORD") CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID') CLIENT_SECRET = os.getenv('GOOGLE_CLIENT_SECRET') ten_days_ago = datetime.datetime.now() + datetime.timedelta(days=-10) def get_credentials(): storage = Storage('creds.data') credentials = storage.get() if credentials is None or credentials.invalid: flow = OAuth2WebServerFlow(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope='https://spreadsheets.google.com/feeds https://docs.google.com/feeds', redirect_uri='http://localhost') parser = argparse.ArgumentParser(parents=[tools.argparser]) flags = parser.parse_args() credentials = tools.run_flow(flow, storage, flags) return credentials def read_spreadsheet(name="Books"): gc = gspread.authorize(get_credentials()) spreadsheet = gc.open(name) books = spreadsheet.sheet1.get_all_values() # convert to list of dictionaries using first row as keys books = [{books[0][i]: book[i] for i in range(len(books[0]))} for book in books[1:]] return books def send_email(subject, message, from_addr=GMAIL_LOGIN, to_addr=GMAIL_LOGIN): msg = MIMEText(message) msg['Subject'] = subject msg['From'] = from_addr msg['To'] = to_addr server = smtplib.SMTP('smtp.gmail.com', 587) # port 465 or 587 server.ehlo() server.starttls() server.ehlo() server.login(GMAIL_LOGIN, GMAIL_PASSWORD) server.sendmail(from_addr, to_addr, msg.as_string()) server.close() def main(): recent_book = None for book in read_spreadsheet(): d = datetime.datetime.strptime(book['Finished'], '%d/%m/%Y') if d > ten_days_ago: recent_book = book break if recent_book is None: send_email("[Nag] Read A Book!", "You need to update the spreadsheet with another book.") if __name__ == "__main__": main()
#!/usr/bin/env python3 # imports go here import os import gspread import datetime import argparse from oauth2client.client import OAuth2WebServerFlow from oauth2client import tools from oauth2client.file import Storage import smtplib from email.mime.text import MIMEText # # Free Coding session for 2015-01-07 # Written by <NAME> # GMAIL_LOGIN = os.getenv("EMAIL_USERNAME") GMAIL_PASSWORD = os.getenv("EMAIL_PASSWORD") CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID') CLIENT_SECRET = os.getenv('GOOGLE_CLIENT_SECRET') ten_days_ago = datetime.datetime.now() + datetime.timedelta(days=-10) def get_credentials(): storage = Storage('creds.data') credentials = storage.get() if credentials is None or credentials.invalid: flow = OAuth2WebServerFlow(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope='https://spreadsheets.google.com/feeds https://docs.google.com/feeds', redirect_uri='http://localhost') parser = argparse.ArgumentParser(parents=[tools.argparser]) flags = parser.parse_args() credentials = tools.run_flow(flow, storage, flags) return credentials def read_spreadsheet(name="Books"): gc = gspread.authorize(get_credentials()) spreadsheet = gc.open(name) books = spreadsheet.sheet1.get_all_values() # convert to list of dictionaries using first row as keys books = [{books[0][i]: book[i] for i in range(len(books[0]))} for book in books[1:]] return books def send_email(subject, message, from_addr=GMAIL_LOGIN, to_addr=GMAIL_LOGIN): msg = MIMEText(message) msg['Subject'] = subject msg['From'] = from_addr msg['To'] = to_addr server = smtplib.SMTP('smtp.gmail.com', 587) # port 465 or 587 server.ehlo() server.starttls() server.ehlo() server.login(GMAIL_LOGIN, GMAIL_PASSWORD) server.sendmail(from_addr, to_addr, msg.as_string()) server.close() def main(): recent_book = None for book in read_spreadsheet(): d = datetime.datetime.strptime(book['Finished'], '%d/%m/%Y') if d > ten_days_ago: recent_book = book break if recent_book is None: send_email("[Nag] Read A Book!", "You need to update the spreadsheet with another book.") if __name__ == "__main__": main()
en
0.689975
#!/usr/bin/env python3 # imports go here # # Free Coding session for 2015-01-07 # Written by <NAME> # # convert to list of dictionaries using first row as keys # port 465 or 587
2.925262
3
code/abc115_d_01.py
KoyanagiHitoshi/AtCoder
3
6629752
<gh_stars>1-10 N,X=map(int,input().split()) a,p=[1],[1] for i in range(N): a.append(a[i]*2+3) p.append(p[i]*2+1) def f(n,x): if n==0:return 0 if x<=0 else 1 elif x<=1+a[n-1]:return f(n-1,x-1) else:return p[n-1]+1+f(n-1,x-2-a[n-1]) print(f(N,X))
N,X=map(int,input().split()) a,p=[1],[1] for i in range(N): a.append(a[i]*2+3) p.append(p[i]*2+1) def f(n,x): if n==0:return 0 if x<=0 else 1 elif x<=1+a[n-1]:return f(n-1,x-1) else:return p[n-1]+1+f(n-1,x-2-a[n-1]) print(f(N,X))
none
1
3.078359
3
brownie/cli/run.py
acolytec3/brownie
0
6629753
<filename>brownie/cli/run.py #!/usr/bin/python3 from docopt import docopt from brownie import network, project, run from brownie.test.output import print_gas_profile from brownie._config import ARGV, CONFIG, update_argv_from_docopt __doc__ = f"""Usage: brownie run <filename> [<function>] [options] Arguments: <filename> The name of the script to run [<function>] The function to call (default is main) Options: --network [name] Use a specific network (default {CONFIG['network']['default']}) --gas -g Display gas profile for function calls --tb -t Show entire python traceback on exceptions --help -h Display this message Use run to execute scripts for contract deployment, to automate common interactions, or for gas profiling.""" def main(): args = docopt(__doc__) update_argv_from_docopt(args) if project.check_for_project(): active_project = project.load() active_project.load_config() print(f"{active_project._name} is the active project.") else: active_project = None print("No project was loaded.") network.connect(ARGV["network"]) run( args["<filename>"], method_name=args["<function>"] or "main", project=active_project, ) if ARGV["gas"]: print_gas_profile()
<filename>brownie/cli/run.py #!/usr/bin/python3 from docopt import docopt from brownie import network, project, run from brownie.test.output import print_gas_profile from brownie._config import ARGV, CONFIG, update_argv_from_docopt __doc__ = f"""Usage: brownie run <filename> [<function>] [options] Arguments: <filename> The name of the script to run [<function>] The function to call (default is main) Options: --network [name] Use a specific network (default {CONFIG['network']['default']}) --gas -g Display gas profile for function calls --tb -t Show entire python traceback on exceptions --help -h Display this message Use run to execute scripts for contract deployment, to automate common interactions, or for gas profiling.""" def main(): args = docopt(__doc__) update_argv_from_docopt(args) if project.check_for_project(): active_project = project.load() active_project.load_config() print(f"{active_project._name} is the active project.") else: active_project = None print("No project was loaded.") network.connect(ARGV["network"]) run( args["<filename>"], method_name=args["<function>"] or "main", project=active_project, ) if ARGV["gas"]: print_gas_profile()
en
0.490269
#!/usr/bin/python3 Usage: brownie run <filename> [<function>] [options] Arguments: <filename> The name of the script to run [<function>] The function to call (default is main) Options: --network [name] Use a specific network (default {CONFIG['network']['default']}) --gas -g Display gas profile for function calls --tb -t Show entire python traceback on exceptions --help -h Display this message Use run to execute scripts for contract deployment, to automate common interactions, or for gas profiling.
2.567316
3
youtube_dl/extractor/spankbang.py
pierrephilip31/download
1
6629754
<gh_stars>1-10 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class SpankBangIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m|[a-z]{2})\.)?spankbang\.com/(?P<id>[\da-z]+)/video' _TESTS = [{ 'url': 'http://spankbang.com/3vvn/video/fantasy+solo', 'md5': '1cc433e1d6aa14bc376535b8679302f7', 'info_dict': { 'id': '3vvn', 'ext': 'mp4', 'title': 'fantasy solo', 'description': 'Watch fantasy solo free HD porn video - 05 minutes - Babe,Masturbation,Solo,Toy - dillion harper masturbates on a bed free adult movies sexy clips.', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'silly2587', 'age_limit': 18, } }, { # 480p only 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang', 'only_matching': True, }, { # no uploader 'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2', 'only_matching': True, }, { # mobile page 'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if re.search(r'<[^>]+\bid=["\']video_removed', webpage): raise ExtractorError( 'Video %s is not available' % video_id, expected=True) stream_key = self._html_search_regex( r'''var\s+stream_key\s*=\s*['"](.+?)['"]''', webpage, 'stream key') formats = [{ 'url': 'http://spankbang.com/_%s/%s/title/%sp__mp4' % (video_id, stream_key, height), 'ext': 'mp4', 'format_id': '%sp' % height, 'height': int(height), } for height in re.findall(r'<(?:span|li|p)[^>]+[qb]_(\d+)p', webpage)] self._check_formats(formats, video_id) self._sort_formats(formats) title = self._html_search_regex( r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title') description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) uploader = self._search_regex( r'class="user"[^>]*><img[^>]+>([^<]+)', webpage, 'uploader', default=None) age_limit = self._rta_search(webpage) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'formats': formats, 'age_limit': age_limit, }
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class SpankBangIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m|[a-z]{2})\.)?spankbang\.com/(?P<id>[\da-z]+)/video' _TESTS = [{ 'url': 'http://spankbang.com/3vvn/video/fantasy+solo', 'md5': '1cc433e1d6aa14bc376535b8679302f7', 'info_dict': { 'id': '3vvn', 'ext': 'mp4', 'title': 'fantasy solo', 'description': 'Watch fantasy solo free HD porn video - 05 minutes - Babe,Masturbation,Solo,Toy - dillion harper masturbates on a bed free adult movies sexy clips.', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'silly2587', 'age_limit': 18, } }, { # 480p only 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang', 'only_matching': True, }, { # no uploader 'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2', 'only_matching': True, }, { # mobile page 'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if re.search(r'<[^>]+\bid=["\']video_removed', webpage): raise ExtractorError( 'Video %s is not available' % video_id, expected=True) stream_key = self._html_search_regex( r'''var\s+stream_key\s*=\s*['"](.+?)['"]''', webpage, 'stream key') formats = [{ 'url': 'http://spankbang.com/_%s/%s/title/%sp__mp4' % (video_id, stream_key, height), 'ext': 'mp4', 'format_id': '%sp' % height, 'height': int(height), } for height in re.findall(r'<(?:span|li|p)[^>]+[qb]_(\d+)p', webpage)] self._check_formats(formats, video_id) self._sort_formats(formats) title = self._html_search_regex( r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title') description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) uploader = self._search_regex( r'class="user"[^>]*><img[^>]+>([^<]+)', webpage, 'uploader', default=None) age_limit = self._rta_search(webpage) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'formats': formats, 'age_limit': age_limit, }
en
0.429547
# 480p only # no uploader # mobile page var\s+stream_key\s*=\s*['"](.+?)['"]
2.214331
2
chainerlp/extensions/restart_lr_scheduler.py
kumasento/gradient-scaling
7
6629755
<reponame>kumasento/gradient-scaling """ This extension implements the LR schedule from the SGDR paper. Reference: [1] https://gist.github.com/hrsma2i/9c6514e94cd5e802d9e216aef2bcfe59 """ import numpy as np from chainer.training import extension class RestartLRScheduler(extension.Extension): """ Implements the extension """ def __init__(self, lr_max, lr_min, T_0, T_mult, max_n_mult=10, optimizer=None): """ CTOR """ super(RestartLRScheduler, self).__init__() self.lr_max = lr_max self.lr_min = lr_min self.T_0 = T_0 self.T_mult = T_mult self.max_n_mult = max_n_mult self.optimizer = optimizer # state self._t = 0 self._last_lr = None self._last_t_i = None self._last_T_i = None # to calculate the current T def initialize(self, trainer): """ Initialize the content in the trainer """ optimizer = self._get_optimizer(trainer) if self._last_lr is not None: # resuming from snapshot self._update_value(optimizer, "lr", self._last_lr) self._update_value(optimizer, "t_i", self._last_t_i) self._update_value(optimizer, "T_i", self._last_T_i) else: self._update_value(optimizer, "lr", self.lr_max) self._update_value(optimizer, "t_i", 1) # starting from 1 self._update_value(optimizer, "T_i", self.T_0) def serialize(self, serializer): """ Resume """ self._t = serializer("_t", self._t) self._last_lr = serializer("_last_lr", self._last_lr) self._last_t_i = serializer("_last_t_i", self._last_t_i) self._last_T_i = serializer("_last_T_i", self._last_T_i) def __call__(self, trainer): """ Main update function. """ self._t += 1 # starting from 1 optimizer = self._get_optimizer(trainer) # period is a range between applying T_mult i = self._get_current_period() T_i = self.T_0 * (self.T_mult ** i) # apply geometric series formula if self.T_mult != 1: t_i = self._t - (T_i - self.T_0) // (self.T_mult - 1) + 1 else: t_i = self._t - T_i * i + 1 # collect learning rate lr = self._get_lr(T_i, t_i) self._update_value(optimizer, "lr", lr) self._update_value(optimizer, "t_i", t_i) self._update_value(optimizer, "T_i", T_i) def _get_lr(self, T_i, t_i): """ Get the learning rate at period p """ # current learning rate return self.lr_min + (self.lr_max - self.lr_min) * 0.5 * ( 1 + np.cos(np.pi * (t_i - 1) / T_i) ) def _get_current_period(self): """ Get the T_cur value """ periods = [self.T_0 * (self.T_mult ** i) for i in range(self.max_n_mult)] cumsum_periods = np.cumsum(periods) return np.where((self._t - cumsum_periods) < 0)[0][0] def _get_optimizer(self, trainer): """ Collect optimizer """ return self.optimizer or trainer.updater.get_optimizer("main") def _update_value(self, optimizer, attr, value): """ Update the attr in optimizer """ setattr(optimizer, attr, value) setattr(self, "_last_{}".format(attr), value) # naming convention
""" This extension implements the LR schedule from the SGDR paper. Reference: [1] https://gist.github.com/hrsma2i/9c6514e94cd5e802d9e216aef2bcfe59 """ import numpy as np from chainer.training import extension class RestartLRScheduler(extension.Extension): """ Implements the extension """ def __init__(self, lr_max, lr_min, T_0, T_mult, max_n_mult=10, optimizer=None): """ CTOR """ super(RestartLRScheduler, self).__init__() self.lr_max = lr_max self.lr_min = lr_min self.T_0 = T_0 self.T_mult = T_mult self.max_n_mult = max_n_mult self.optimizer = optimizer # state self._t = 0 self._last_lr = None self._last_t_i = None self._last_T_i = None # to calculate the current T def initialize(self, trainer): """ Initialize the content in the trainer """ optimizer = self._get_optimizer(trainer) if self._last_lr is not None: # resuming from snapshot self._update_value(optimizer, "lr", self._last_lr) self._update_value(optimizer, "t_i", self._last_t_i) self._update_value(optimizer, "T_i", self._last_T_i) else: self._update_value(optimizer, "lr", self.lr_max) self._update_value(optimizer, "t_i", 1) # starting from 1 self._update_value(optimizer, "T_i", self.T_0) def serialize(self, serializer): """ Resume """ self._t = serializer("_t", self._t) self._last_lr = serializer("_last_lr", self._last_lr) self._last_t_i = serializer("_last_t_i", self._last_t_i) self._last_T_i = serializer("_last_T_i", self._last_T_i) def __call__(self, trainer): """ Main update function. """ self._t += 1 # starting from 1 optimizer = self._get_optimizer(trainer) # period is a range between applying T_mult i = self._get_current_period() T_i = self.T_0 * (self.T_mult ** i) # apply geometric series formula if self.T_mult != 1: t_i = self._t - (T_i - self.T_0) // (self.T_mult - 1) + 1 else: t_i = self._t - T_i * i + 1 # collect learning rate lr = self._get_lr(T_i, t_i) self._update_value(optimizer, "lr", lr) self._update_value(optimizer, "t_i", t_i) self._update_value(optimizer, "T_i", T_i) def _get_lr(self, T_i, t_i): """ Get the learning rate at period p """ # current learning rate return self.lr_min + (self.lr_max - self.lr_min) * 0.5 * ( 1 + np.cos(np.pi * (t_i - 1) / T_i) ) def _get_current_period(self): """ Get the T_cur value """ periods = [self.T_0 * (self.T_mult ** i) for i in range(self.max_n_mult)] cumsum_periods = np.cumsum(periods) return np.where((self._t - cumsum_periods) < 0)[0][0] def _get_optimizer(self, trainer): """ Collect optimizer """ return self.optimizer or trainer.updater.get_optimizer("main") def _update_value(self, optimizer, attr, value): """ Update the attr in optimizer """ setattr(optimizer, attr, value) setattr(self, "_last_{}".format(attr), value) # naming convention
en
0.779081
This extension implements the LR schedule from the SGDR paper. Reference: [1] https://gist.github.com/hrsma2i/9c6514e94cd5e802d9e216aef2bcfe59 Implements the extension CTOR # state # to calculate the current T Initialize the content in the trainer # resuming from snapshot # starting from 1 Resume Main update function. # starting from 1 # period is a range between applying T_mult # apply geometric series formula # collect learning rate Get the learning rate at period p # current learning rate Get the T_cur value Collect optimizer Update the attr in optimizer # naming convention
2.582136
3
src/models/ensemble/__init__.py
universome/loss-patterns
84
6629756
from .plane_ensemble import PlaneEnsemble from .mapping_ensemble import MappingEnsemble from .normal_ensemble import NormalEnsemble __all__ = [ "PlaneEnsemble", "MappingEnsemble", "NormalEnsemble" ]
from .plane_ensemble import PlaneEnsemble from .mapping_ensemble import MappingEnsemble from .normal_ensemble import NormalEnsemble __all__ = [ "PlaneEnsemble", "MappingEnsemble", "NormalEnsemble" ]
none
1
0.969167
1
inline-callbacks/inline-callbacks-1.py
yukityan/twisted-intro
430
6629757
from twisted.internet.defer import inlineCallbacks, Deferred @inlineCallbacks def my_callbacks(): from twisted.internet import reactor print('first callback') result = yield 1 # yielded values that aren't deferred come right back print('second callback got', result) d = Deferred() reactor.callLater(5, d.callback, 2) result = yield d # yielded deferreds will pause the generator print('third callback got', result) # the result of the deferred d = Deferred() reactor.callLater(5, d.errback, Exception(3)) try: yield d except Exception as e: result = e print('fourth callback got', repr(result)) # the exception from the deferred reactor.stop() from twisted.internet import reactor reactor.callWhenRunning(my_callbacks) reactor.run()
from twisted.internet.defer import inlineCallbacks, Deferred @inlineCallbacks def my_callbacks(): from twisted.internet import reactor print('first callback') result = yield 1 # yielded values that aren't deferred come right back print('second callback got', result) d = Deferred() reactor.callLater(5, d.callback, 2) result = yield d # yielded deferreds will pause the generator print('third callback got', result) # the result of the deferred d = Deferred() reactor.callLater(5, d.errback, Exception(3)) try: yield d except Exception as e: result = e print('fourth callback got', repr(result)) # the exception from the deferred reactor.stop() from twisted.internet import reactor reactor.callWhenRunning(my_callbacks) reactor.run()
en
0.912887
# yielded values that aren't deferred come right back # yielded deferreds will pause the generator # the result of the deferred # the exception from the deferred
2.605013
3
mirumon/infra/infra_model.py
mirumon/mirumon-backend
19
6629758
<gh_stars>10-100 from typing import Any, TypeVar from pydantic import BaseModel from mirumon.domain.core.entity import Entity EntityT = TypeVar("EntityT", bound=Entity) class InfraModel(BaseModel): # type: ignore """Base class for models used in the infrastructure layer.""" id: Any # type: ignore @classmethod def from_entity(cls, entity: EntityT) -> BaseModel: return cls.parse_obj(entity.dict()) def to_entity(self) -> EntityT: raise NotImplementedError
from typing import Any, TypeVar from pydantic import BaseModel from mirumon.domain.core.entity import Entity EntityT = TypeVar("EntityT", bound=Entity) class InfraModel(BaseModel): # type: ignore """Base class for models used in the infrastructure layer.""" id: Any # type: ignore @classmethod def from_entity(cls, entity: EntityT) -> BaseModel: return cls.parse_obj(entity.dict()) def to_entity(self) -> EntityT: raise NotImplementedError
en
0.689808
# type: ignore Base class for models used in the infrastructure layer. # type: ignore
2.524287
3
slu_test.py
hursung1/coach
72
6629759
<gh_stars>10-100 from src.utils import init_experiment from src.slu.datareader import datareader, read_file, binarize_data from src.slu.dataloader import get_dataloader, Dataset, DataLoader, collate_fn from src.slu.baseline_loader import get_dataloader as get_baselineloader from src.slu.baseline_loader import collate_fn as baseline_collate_fn from src.slu.baseline_loader import Dataset as BaselineDataset from src.slu.trainer import SLUTrainer from src.slu.baseline_trainer import BaselineTrainer from config import get_params import torch import os def test_coach(params): # get dataloader _, _, dataloader_test, _ = get_dataloader(params.tgt_dm, params.batch_size, params.tr, params.n_samples) model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) binary_slu_tagger = reloaded["binary_slu_tagger"] slotname_predictor = reloaded["slotname_predictor"] binary_slu_tagger.cuda() slotname_predictor.cuda() slu_trainer = SLUTrainer(params, binary_slu_tagger, slotname_predictor) _, f1_score, _ = slu_trainer.evaluate(dataloader_test, istestset=True) print("Eval on test set. Final Slot F1 Score: {:.4f}.".format(f1_score)) def test_baseline(params): # get dataloader _, _, dataloader_test, _ = get_baselineloader(params.tgt_dm, params.batch_size, params.n_samples) model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) slu_tagger = reloaded["slu_tagger"] slu_tagger.cuda() baseline_trainer = BaselineTrainer(params, slu_tagger) _, f1_score, _ = baseline_trainer.evaluate(0, dataloader_test, istestset=True) print("Eval on test set. Slot F1 Score: {:.4f}.".format(f1_score)) def test_coach_on_seen_and_unseen(params): # read seen and unseen data print("Getting vocabulary ...") _, vocab = datareader(params.tr) print("Processing Unseen and Seen samples in %s domain ..." % params.tgt_dm) unseen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/unseen_slots.txt", vocab, False) seen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/seen_slots.txt", vocab, False) print("Binarizing data ...") if len(unseen_data["utter"]) > 0: unseen_data_bin = binarize_data(unseen_data, vocab, params.tgt_dm, False) else: unseen_data_bin = None if len(seen_data["utter"]) > 0: seen_data_bin = binarize_data(seen_data, vocab, params.tgt_dm, False) else: seen_data_bin = None model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) binary_slu_tagger = reloaded["binary_slu_tagger"] slotname_predictor = reloaded["slotname_predictor"] binary_slu_tagger.cuda() slotname_predictor.cuda() slu_trainer = SLUTrainer(params, binary_slu_tagger, slotname_predictor) print("Prepare dataloader ...") if unseen_data_bin: unseen_dataset = Dataset(unseen_data_bin["utter"], unseen_data_bin["y1"], unseen_data_bin["y2"], unseen_data_bin["domains"]) unseen_dataloader = DataLoader(dataset=unseen_dataset, batch_size=params.batch_size, collate_fn=collate_fn, shuffle=False) _, f1_score, _ = slu_trainer.evaluate(unseen_dataloader, istestset=True) print("Evaluate on {} domain unseen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of unseen sample is zero") if seen_data_bin: seen_dataset = Dataset(seen_data_bin["utter"], seen_data_bin["y1"], seen_data_bin["y2"], seen_data_bin["domains"]) seen_dataloader = DataLoader(dataset=seen_dataset, batch_size=params.batch_size, collate_fn=collate_fn, shuffle=False) _, f1_score, _ = slu_trainer.evaluate(seen_dataloader, istestset=True) print("Evaluate on {} domain seen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of seen sample is zero") def test_baseline_on_seen_and_unseen(params): # read seen and unseen data print("Getting vocabulary ...") _, vocab = datareader() print("Processing Unseen and Seen samples in %s domain ..." % params.tgt_dm) unseen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/unseen_slots.txt", vocab, False) seen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/seen_slots.txt", vocab, False) print("Binarizing data ...") if len(unseen_data["utter"]) > 0: unseen_data_bin = binarize_data(unseen_data, vocab, params.tgt_dm, False) else: unseen_data_bin = None if len(seen_data["utter"]) > 0: seen_data_bin = binarize_data(seen_data, vocab, params.tgt_dm, False) else: seen_data_bin = None model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) slu_tagger = reloaded["slu_tagger"] slu_tagger.cuda() baseline_trainer = BaselineTrainer(params, slu_tagger) print("Prepare dataloader ...") if unseen_data_bin: unseen_dataset = BaselineDataset(unseen_data_bin["utter"], unseen_data_bin["y2"], unseen_data_bin["domains"]) unseen_dataloader = DataLoader(dataset=unseen_dataset, batch_size=params.batch_size, collate_fn=baseline_collate_fn, shuffle=False) _, f1_score, _ = baseline_trainer.evaluate(0, unseen_dataloader, istestset=True) print("Evaluate on {} domain unseen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of unseen sample is zero") if seen_data_bin: seen_dataset = BaselineDataset(seen_data_bin["utter"], seen_data_bin["y2"], seen_data_bin["domains"]) seen_dataloader = DataLoader(dataset=seen_dataset, batch_size=params.batch_size, collate_fn=baseline_collate_fn, shuffle=False) _, f1_score, _ = baseline_trainer.evaluate(0, seen_dataloader, istestset=True) print("Evaluate on {} domain seen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of seen sample is zero") if __name__ == "__main__": params = get_params() if params.model_type == "coach": if params.test_mode == "testset": test_coach(params) elif params.test_mode == "seen_unseen": test_coach_on_seen_and_unseen(params) else: if params.test_mode == "testset": test_baseline(params) elif params.test_mode == "seen_unseen": test_baseline_on_seen_and_unseen(params)
from src.utils import init_experiment from src.slu.datareader import datareader, read_file, binarize_data from src.slu.dataloader import get_dataloader, Dataset, DataLoader, collate_fn from src.slu.baseline_loader import get_dataloader as get_baselineloader from src.slu.baseline_loader import collate_fn as baseline_collate_fn from src.slu.baseline_loader import Dataset as BaselineDataset from src.slu.trainer import SLUTrainer from src.slu.baseline_trainer import BaselineTrainer from config import get_params import torch import os def test_coach(params): # get dataloader _, _, dataloader_test, _ = get_dataloader(params.tgt_dm, params.batch_size, params.tr, params.n_samples) model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) binary_slu_tagger = reloaded["binary_slu_tagger"] slotname_predictor = reloaded["slotname_predictor"] binary_slu_tagger.cuda() slotname_predictor.cuda() slu_trainer = SLUTrainer(params, binary_slu_tagger, slotname_predictor) _, f1_score, _ = slu_trainer.evaluate(dataloader_test, istestset=True) print("Eval on test set. Final Slot F1 Score: {:.4f}.".format(f1_score)) def test_baseline(params): # get dataloader _, _, dataloader_test, _ = get_baselineloader(params.tgt_dm, params.batch_size, params.n_samples) model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) slu_tagger = reloaded["slu_tagger"] slu_tagger.cuda() baseline_trainer = BaselineTrainer(params, slu_tagger) _, f1_score, _ = baseline_trainer.evaluate(0, dataloader_test, istestset=True) print("Eval on test set. Slot F1 Score: {:.4f}.".format(f1_score)) def test_coach_on_seen_and_unseen(params): # read seen and unseen data print("Getting vocabulary ...") _, vocab = datareader(params.tr) print("Processing Unseen and Seen samples in %s domain ..." % params.tgt_dm) unseen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/unseen_slots.txt", vocab, False) seen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/seen_slots.txt", vocab, False) print("Binarizing data ...") if len(unseen_data["utter"]) > 0: unseen_data_bin = binarize_data(unseen_data, vocab, params.tgt_dm, False) else: unseen_data_bin = None if len(seen_data["utter"]) > 0: seen_data_bin = binarize_data(seen_data, vocab, params.tgt_dm, False) else: seen_data_bin = None model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) binary_slu_tagger = reloaded["binary_slu_tagger"] slotname_predictor = reloaded["slotname_predictor"] binary_slu_tagger.cuda() slotname_predictor.cuda() slu_trainer = SLUTrainer(params, binary_slu_tagger, slotname_predictor) print("Prepare dataloader ...") if unseen_data_bin: unseen_dataset = Dataset(unseen_data_bin["utter"], unseen_data_bin["y1"], unseen_data_bin["y2"], unseen_data_bin["domains"]) unseen_dataloader = DataLoader(dataset=unseen_dataset, batch_size=params.batch_size, collate_fn=collate_fn, shuffle=False) _, f1_score, _ = slu_trainer.evaluate(unseen_dataloader, istestset=True) print("Evaluate on {} domain unseen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of unseen sample is zero") if seen_data_bin: seen_dataset = Dataset(seen_data_bin["utter"], seen_data_bin["y1"], seen_data_bin["y2"], seen_data_bin["domains"]) seen_dataloader = DataLoader(dataset=seen_dataset, batch_size=params.batch_size, collate_fn=collate_fn, shuffle=False) _, f1_score, _ = slu_trainer.evaluate(seen_dataloader, istestset=True) print("Evaluate on {} domain seen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of seen sample is zero") def test_baseline_on_seen_and_unseen(params): # read seen and unseen data print("Getting vocabulary ...") _, vocab = datareader() print("Processing Unseen and Seen samples in %s domain ..." % params.tgt_dm) unseen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/unseen_slots.txt", vocab, False) seen_data, vocab = read_file("data/snips/"+params.tgt_dm+"/seen_slots.txt", vocab, False) print("Binarizing data ...") if len(unseen_data["utter"]) > 0: unseen_data_bin = binarize_data(unseen_data, vocab, params.tgt_dm, False) else: unseen_data_bin = None if len(seen_data["utter"]) > 0: seen_data_bin = binarize_data(seen_data, vocab, params.tgt_dm, False) else: seen_data_bin = None model_path = params.model_path assert os.path.isfile(model_path) reloaded = torch.load(model_path) slu_tagger = reloaded["slu_tagger"] slu_tagger.cuda() baseline_trainer = BaselineTrainer(params, slu_tagger) print("Prepare dataloader ...") if unseen_data_bin: unseen_dataset = BaselineDataset(unseen_data_bin["utter"], unseen_data_bin["y2"], unseen_data_bin["domains"]) unseen_dataloader = DataLoader(dataset=unseen_dataset, batch_size=params.batch_size, collate_fn=baseline_collate_fn, shuffle=False) _, f1_score, _ = baseline_trainer.evaluate(0, unseen_dataloader, istestset=True) print("Evaluate on {} domain unseen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of unseen sample is zero") if seen_data_bin: seen_dataset = BaselineDataset(seen_data_bin["utter"], seen_data_bin["y2"], seen_data_bin["domains"]) seen_dataloader = DataLoader(dataset=seen_dataset, batch_size=params.batch_size, collate_fn=baseline_collate_fn, shuffle=False) _, f1_score, _ = baseline_trainer.evaluate(0, seen_dataloader, istestset=True) print("Evaluate on {} domain seen slots. Final slot F1 score: {:.4f}.".format(params.tgt_dm, f1_score)) else: print("Number of seen sample is zero") if __name__ == "__main__": params = get_params() if params.model_type == "coach": if params.test_mode == "testset": test_coach(params) elif params.test_mode == "seen_unseen": test_coach_on_seen_and_unseen(params) else: if params.test_mode == "testset": test_baseline(params) elif params.test_mode == "seen_unseen": test_baseline_on_seen_and_unseen(params)
en
0.812102
# get dataloader # get dataloader # read seen and unseen data # read seen and unseen data
2.054438
2
webnotes/model/bean.py
gangadhar-kadam/sapphite_lib
0
6629760
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. # MIT License. See license.txt from __future__ import unicode_literals """ Transactions are defined as collection of classes, a Bean represents collection of Document objects for a transaction with main and children. Group actions like save, etc are performed on doclists """ import webnotes from webnotes import _, msgprint from webnotes.utils import cint, cstr, flt from webnotes.model.doc import Document class DocstatusTransitionError(webnotes.ValidationError): pass class BeanPermissionError(webnotes.ValidationError): pass class TimestampMismatchError(webnotes.ValidationError): pass class Bean: """ Collection of Documents with one parent and multiple children """ def __init__(self, dt=None, dn=None): self.obj = None self.ignore_permissions = False self.ignore_children_type = [] self.ignore_links = False self.ignore_validate = False self.ignore_fields = False self.ignore_mandatory = False if isinstance(dt, basestring) and not dn: dn = dt if dt and dn: self.load_from_db(dt, dn) elif isinstance(dt, list): self.set_doclist(dt) elif isinstance(dt, dict): self.set_doclist([dt]) def load_from_db(self, dt=None, dn=None, prefix='tab'): """ Load doclist from dt """ from webnotes.model.doc import getchildren if not dt: dt = self.doc.doctype if not dn: dn = self.doc.name doc = Document(dt, dn, prefix=prefix) # get all children types tablefields = webnotes.model.meta.get_table_fields(dt) # load chilren doclist = webnotes.doclist([doc,]) for t in tablefields: doclist += getchildren(doc.name, t[0], t[1], dt, prefix=prefix) self.set_doclist(doclist) if dt == dn: self.convert_type(self.doc) def __iter__(self): return self.doclist.__iter__() @property def meta(self): if not hasattr(self, "_meta"): self._meta = webnotes.get_doctype(self.doc.doctype) return self._meta def from_compressed(self, data, docname): from webnotes.model.utils import expand self.set_doclist(expand(data)) def set_doclist(self, doclist): for i, d in enumerate(doclist): if isinstance(d, dict): doclist[i] = Document(fielddata=d) self.doclist = webnotes.doclist(doclist) self.doc = self.doclist[0] if self.obj: self.obj.doclist = self.doclist self.obj.doc = self.doc def make_controller(self): if self.obj: # update doclist before running any method self.obj.doclist = self.doclist return self.obj self.obj = webnotes.get_obj(doc=self.doc, doclist=self.doclist) self.obj.bean = self self.controller = self.obj return self.obj def get_controller(self): return self.make_controller() def to_dict(self): return [d.fields for d in self.doclist] def check_if_latest(self, method="save"): from webnotes.model.meta import is_single conflict = False if not cint(self.doc.fields.get('__islocal')): if is_single(self.doc.doctype): modified = webnotes.conn.get_value(self.doc.doctype, self.doc.name, "modified") if isinstance(modified, list): modified = modified[0] if cstr(modified) and cstr(modified) != cstr(self.doc.modified): conflict = True else: tmp = webnotes.conn.sql("""select modified, docstatus from `tab%s` where name="%s" for update""" % (self.doc.doctype, self.doc.name), as_dict=True) if not tmp: webnotes.msgprint("""This record does not exist. Please refresh.""", raise_exception=1) modified = cstr(tmp[0].modified) if modified and modified != cstr(self.doc.modified): conflict = True self.check_docstatus_transition(tmp[0].docstatus, method) if conflict: webnotes.msgprint(_("Error: Document has been modified after you have opened it") \ + (" (%s, %s). " % (modified, self.doc.modified)) \ + _("Please refresh to get the latest document."), raise_exception=TimestampMismatchError) def check_docstatus_transition(self, db_docstatus, method): valid = { "save": [0,0], "submit": [0,1], "cancel": [1,2], "update_after_submit": [1,1] } labels = { 0: _("Draft"), 1: _("Submitted"), 2: _("Cancelled") } if not hasattr(self, "to_docstatus"): self.to_docstatus = 0 if method != "runserverobj" and [db_docstatus, self.to_docstatus] != valid[method]: webnotes.msgprint(_("Cannot change from") + ": " + labels[db_docstatus] + " > " + \ labels[self.to_docstatus], raise_exception=DocstatusTransitionError) def check_links(self): if self.ignore_links: return ref, err_list = {}, [] for d in self.doclist: if not ref.get(d.doctype): ref[d.doctype] = d.make_link_list() err_list += d.validate_links(ref[d.doctype]) if err_list: webnotes.msgprint("""[Link Validation] Could not find the following values: %s. Please correct and resave. Document Not Saved.""" % ', '.join(err_list), raise_exception=1) def update_timestamps_and_docstatus(self): from webnotes.utils import now ts = now() user = webnotes.__dict__.get('session', {}).get('user') or 'Administrator' for d in self.doclist: if self.doc.fields.get('__islocal'): if not d.owner: d.owner = user if not d.creation: d.creation = ts d.modified_by = user d.modified = ts if d.docstatus != 2 and self.to_docstatus >= int(d.docstatus): # don't update deleted d.docstatus = self.to_docstatus def prepare_for_save(self, method): self.check_if_latest(method) if method != "cancel": self.check_links() self.update_timestamps_and_docstatus() self.update_parent_info() def update_parent_info(self): idx_map = {} is_local = cint(self.doc.fields.get("__islocal")) if not webnotes.in_import: parentfields = [d.fieldname for d in self.meta.get({"doctype": "DocField", "fieldtype": "Table"})] for i, d in enumerate(self.doclist[1:]): if d.parentfield: if not webnotes.in_import: if not d.parentfield in parentfields: webnotes.msgprint("Bad parentfield %s" % d.parentfield, raise_exception=True) d.parenttype = self.doc.doctype d.parent = self.doc.name if not d.idx: d.idx = idx_map.setdefault(d.parentfield, 0) + 1 else: d.idx = cint(d.idx) if is_local: # if parent is new, all children should be new d.fields["__islocal"] = 1 d.name = None idx_map[d.parentfield] = d.idx def run_method(self, method, *args, **kwargs): self.make_controller() if hasattr(self.controller, method): getattr(self.controller, method)(*args, **kwargs) if hasattr(self.controller, 'custom_' + method): getattr(self.controller, 'custom_' + method)(*args, **kwargs) notify(self.controller, method) self.set_doclist(self.controller.doclist) def get_method(self, method): self.make_controller() return getattr(self.controller, method, None) def save_main(self): try: self.doc.save(check_links = False, ignore_fields = self.ignore_fields) except NameError, e: webnotes.msgprint('%s "%s" already exists' % (self.doc.doctype, self.doc.name)) # prompt if cancelled if webnotes.conn.get_value(self.doc.doctype, self.doc.name, 'docstatus')==2: webnotes.msgprint('[%s "%s" has been cancelled]' % (self.doc.doctype, self.doc.name)) webnotes.errprint(webnotes.utils.getTraceback()) raise e def save_children(self): child_map = {} for d in self.doclist[1:]: if d.fields.get("parent") or d.fields.get("parentfield"): d.parent = self.doc.name # rename if reqd d.parenttype = self.doc.doctype d.save(check_links=False, ignore_fields = self.ignore_fields) child_map.setdefault(d.doctype, []).append(d.name) # delete all children in database that are not in the child_map # get all children types tablefields = webnotes.model.meta.get_table_fields(self.doc.doctype) for dt in tablefields: if dt[0] not in self.ignore_children_type: cnames = child_map.get(dt[0]) or [] if cnames: webnotes.conn.sql("""delete from `tab%s` where parent=%s and parenttype=%s and name not in (%s)""" % (dt[0], '%s', '%s', ','.join(['%s'] * len(cnames))), tuple([self.doc.name, self.doc.doctype] + cnames)) else: webnotes.conn.sql("""delete from `tab%s` where parent=%s and parenttype=%s""" \ % (dt[0], '%s', '%s'), (self.doc.name, self.doc.doctype)) def insert(self): self.doc.fields["__islocal"] = 1 self.set_defaults() if webnotes.in_test: if self.meta.get_field("naming_series"): self.doc.naming_series = "_T-" + self.doc.doctype + "-" return self.save() def insert_or_update(self): if webnotes.conn.exists( self.doc.doctype, self.doc.name): return self.save() else: return self.insert() def set_defaults(self): if webnotes.in_import: return new_docs = {} new_doclist = [] for d in self.doclist: if not d.doctype in new_docs: new_docs[d.doctype] = webnotes.new_doc(d.doctype) newd = webnotes.doc(new_docs[d.doctype].fields.copy()) newd.fields.update(d.fields) new_doclist.append(newd) self.set_doclist(new_doclist) def has_read_perm(self): return webnotes.has_permission(self.doc.doctype, "read", self.doc) def save(self, check_links=1): perm_to_check = "write" if self.doc.fields.get("__islocal"): perm_to_check = "create" if not self.doc.owner: self.doc.owner = webnotes.session.user if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, perm_to_check, self.doc): self.to_docstatus = 0 self.prepare_for_save("save") if not self.ignore_validate: self.run_method('validate') if not self.ignore_mandatory: self.check_mandatory() self.save_main() self.save_children() self.run_method('on_update') else: self.no_permission_to(_(perm_to_check.title())) return self def submit(self): if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "submit", self.doc): self.to_docstatus = 1 self.prepare_for_save("submit") self.run_method('validate') self.check_mandatory() self.save_main() self.save_children() self.run_method('on_update') self.run_method('on_submit') else: self.no_permission_to(_("Submit")) return self def cancel(self): if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "cancel", self.doc): self.to_docstatus = 2 self.prepare_for_save("cancel") self.run_method('before_cancel') self.save_main() self.save_children() self.run_method('on_cancel') self.check_no_back_links_exist() else: self.no_permission_to(_("Cancel")) return self def update_after_submit(self): if self.doc.docstatus != 1: webnotes.msgprint("Only to called after submit", raise_exception=1) if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "write", self.doc): self.to_docstatus = 1 self.prepare_for_save("update_after_submit") self.run_method('before_update_after_submit') self.save_main() self.save_children() self.run_method('on_update_after_submit') else: self.no_permission_to(_("Update")) return self def delete(self): webnotes.delete_doc(self.doc.doctype, self.doc.name) def no_permission_to(self, ptype): webnotes.msgprint(("%s (%s): " % (self.doc.name, _(self.doc.doctype))) + \ _("No Permission to ") + ptype, raise_exception=BeanPermissionError) def check_no_back_links_exist(self): from webnotes.model.utils import check_if_doc_is_linked check_if_doc_is_linked(self.doc.doctype, self.doc.name, method="Cancel") def check_mandatory(self): missing = [] for doc in self.doclist: for df in self.meta: if df.doctype=="DocField" and df.reqd and df.parent==doc.doctype and df.fieldname!="naming_series": msg = "" if df.fieldtype == "Table": if not self.doclist.get({"parentfield": df.fieldname}): msg = _("Error") + ": " + _("Data missing in table") + ": " + _(df.label) elif doc.fields.get(df.fieldname) is None: msg = _("Error") + ": " if doc.parentfield: msg += _("Row") + (" # %s: " % (doc.idx,)) msg += _("Value missing for") + ": " + _(df.label) if msg: missing.append([msg, df.fieldname]) if missing: for msg, fieldname in missing: msgprint(msg) raise webnotes.MandatoryError, ", ".join([fieldname for msg, fieldname in missing]) def convert_type(self, doc): if doc.doctype==doc.name and doc.doctype!="DocType": for df in self.meta.get({"doctype": "DocField", "parent": doc.doctype}): if df.fieldtype in ("Int", "Check"): doc.fields[df.fieldname] = cint(doc.fields.get(df.fieldname)) elif df.fieldtype in ("Float", "Currency"): doc.fields[df.fieldname] = flt(doc.fields.get(df.fieldname)) doc.docstatus = cint(doc.docstatus) def clone(source_wrapper): """ make a clone of a document""" if isinstance(source_wrapper, list): source_wrapper = Bean(source_wrapper) new_wrapper = Bean(source_wrapper.doclist.copy()) if new_wrapper.doc.fields.get("amended_from"): new_wrapper.doc.fields["amended_from"] = None if new_wrapper.doc.fields.get("amendment_date"): new_wrapper.doc.fields["amendment_date"] = None for d in new_wrapper.doclist: d.fields.update({ "name": None, "__islocal": 1, "docstatus": 0, }) return new_wrapper def notify(controller, caller_method): try: from startup.observers import observer_map except ImportError: return doctype = controller.doc.doctype def call_observers(key): if key in observer_map: observer_list = observer_map[key] if isinstance(observer_list, basestring): observer_list = [observer_list] for observer_method in observer_list: webnotes.get_method(observer_method)(controller, caller_method) call_observers("*:*") call_observers(doctype + ":*") call_observers("*:" + caller_method) call_observers(doctype + ":" + caller_method) # for bc def getlist(doclist, parentfield): """ Return child records of a particular type """ import webnotes.model.utils return webnotes.model.utils.getlist(doclist, parentfield) def copy_doclist(doclist, no_copy = []): """ Make a copy of the doclist """ import webnotes.model.utils return webnotes.model.utils.copy_doclist(doclist, no_copy)
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. # MIT License. See license.txt from __future__ import unicode_literals """ Transactions are defined as collection of classes, a Bean represents collection of Document objects for a transaction with main and children. Group actions like save, etc are performed on doclists """ import webnotes from webnotes import _, msgprint from webnotes.utils import cint, cstr, flt from webnotes.model.doc import Document class DocstatusTransitionError(webnotes.ValidationError): pass class BeanPermissionError(webnotes.ValidationError): pass class TimestampMismatchError(webnotes.ValidationError): pass class Bean: """ Collection of Documents with one parent and multiple children """ def __init__(self, dt=None, dn=None): self.obj = None self.ignore_permissions = False self.ignore_children_type = [] self.ignore_links = False self.ignore_validate = False self.ignore_fields = False self.ignore_mandatory = False if isinstance(dt, basestring) and not dn: dn = dt if dt and dn: self.load_from_db(dt, dn) elif isinstance(dt, list): self.set_doclist(dt) elif isinstance(dt, dict): self.set_doclist([dt]) def load_from_db(self, dt=None, dn=None, prefix='tab'): """ Load doclist from dt """ from webnotes.model.doc import getchildren if not dt: dt = self.doc.doctype if not dn: dn = self.doc.name doc = Document(dt, dn, prefix=prefix) # get all children types tablefields = webnotes.model.meta.get_table_fields(dt) # load chilren doclist = webnotes.doclist([doc,]) for t in tablefields: doclist += getchildren(doc.name, t[0], t[1], dt, prefix=prefix) self.set_doclist(doclist) if dt == dn: self.convert_type(self.doc) def __iter__(self): return self.doclist.__iter__() @property def meta(self): if not hasattr(self, "_meta"): self._meta = webnotes.get_doctype(self.doc.doctype) return self._meta def from_compressed(self, data, docname): from webnotes.model.utils import expand self.set_doclist(expand(data)) def set_doclist(self, doclist): for i, d in enumerate(doclist): if isinstance(d, dict): doclist[i] = Document(fielddata=d) self.doclist = webnotes.doclist(doclist) self.doc = self.doclist[0] if self.obj: self.obj.doclist = self.doclist self.obj.doc = self.doc def make_controller(self): if self.obj: # update doclist before running any method self.obj.doclist = self.doclist return self.obj self.obj = webnotes.get_obj(doc=self.doc, doclist=self.doclist) self.obj.bean = self self.controller = self.obj return self.obj def get_controller(self): return self.make_controller() def to_dict(self): return [d.fields for d in self.doclist] def check_if_latest(self, method="save"): from webnotes.model.meta import is_single conflict = False if not cint(self.doc.fields.get('__islocal')): if is_single(self.doc.doctype): modified = webnotes.conn.get_value(self.doc.doctype, self.doc.name, "modified") if isinstance(modified, list): modified = modified[0] if cstr(modified) and cstr(modified) != cstr(self.doc.modified): conflict = True else: tmp = webnotes.conn.sql("""select modified, docstatus from `tab%s` where name="%s" for update""" % (self.doc.doctype, self.doc.name), as_dict=True) if not tmp: webnotes.msgprint("""This record does not exist. Please refresh.""", raise_exception=1) modified = cstr(tmp[0].modified) if modified and modified != cstr(self.doc.modified): conflict = True self.check_docstatus_transition(tmp[0].docstatus, method) if conflict: webnotes.msgprint(_("Error: Document has been modified after you have opened it") \ + (" (%s, %s). " % (modified, self.doc.modified)) \ + _("Please refresh to get the latest document."), raise_exception=TimestampMismatchError) def check_docstatus_transition(self, db_docstatus, method): valid = { "save": [0,0], "submit": [0,1], "cancel": [1,2], "update_after_submit": [1,1] } labels = { 0: _("Draft"), 1: _("Submitted"), 2: _("Cancelled") } if not hasattr(self, "to_docstatus"): self.to_docstatus = 0 if method != "runserverobj" and [db_docstatus, self.to_docstatus] != valid[method]: webnotes.msgprint(_("Cannot change from") + ": " + labels[db_docstatus] + " > " + \ labels[self.to_docstatus], raise_exception=DocstatusTransitionError) def check_links(self): if self.ignore_links: return ref, err_list = {}, [] for d in self.doclist: if not ref.get(d.doctype): ref[d.doctype] = d.make_link_list() err_list += d.validate_links(ref[d.doctype]) if err_list: webnotes.msgprint("""[Link Validation] Could not find the following values: %s. Please correct and resave. Document Not Saved.""" % ', '.join(err_list), raise_exception=1) def update_timestamps_and_docstatus(self): from webnotes.utils import now ts = now() user = webnotes.__dict__.get('session', {}).get('user') or 'Administrator' for d in self.doclist: if self.doc.fields.get('__islocal'): if not d.owner: d.owner = user if not d.creation: d.creation = ts d.modified_by = user d.modified = ts if d.docstatus != 2 and self.to_docstatus >= int(d.docstatus): # don't update deleted d.docstatus = self.to_docstatus def prepare_for_save(self, method): self.check_if_latest(method) if method != "cancel": self.check_links() self.update_timestamps_and_docstatus() self.update_parent_info() def update_parent_info(self): idx_map = {} is_local = cint(self.doc.fields.get("__islocal")) if not webnotes.in_import: parentfields = [d.fieldname for d in self.meta.get({"doctype": "DocField", "fieldtype": "Table"})] for i, d in enumerate(self.doclist[1:]): if d.parentfield: if not webnotes.in_import: if not d.parentfield in parentfields: webnotes.msgprint("Bad parentfield %s" % d.parentfield, raise_exception=True) d.parenttype = self.doc.doctype d.parent = self.doc.name if not d.idx: d.idx = idx_map.setdefault(d.parentfield, 0) + 1 else: d.idx = cint(d.idx) if is_local: # if parent is new, all children should be new d.fields["__islocal"] = 1 d.name = None idx_map[d.parentfield] = d.idx def run_method(self, method, *args, **kwargs): self.make_controller() if hasattr(self.controller, method): getattr(self.controller, method)(*args, **kwargs) if hasattr(self.controller, 'custom_' + method): getattr(self.controller, 'custom_' + method)(*args, **kwargs) notify(self.controller, method) self.set_doclist(self.controller.doclist) def get_method(self, method): self.make_controller() return getattr(self.controller, method, None) def save_main(self): try: self.doc.save(check_links = False, ignore_fields = self.ignore_fields) except NameError, e: webnotes.msgprint('%s "%s" already exists' % (self.doc.doctype, self.doc.name)) # prompt if cancelled if webnotes.conn.get_value(self.doc.doctype, self.doc.name, 'docstatus')==2: webnotes.msgprint('[%s "%s" has been cancelled]' % (self.doc.doctype, self.doc.name)) webnotes.errprint(webnotes.utils.getTraceback()) raise e def save_children(self): child_map = {} for d in self.doclist[1:]: if d.fields.get("parent") or d.fields.get("parentfield"): d.parent = self.doc.name # rename if reqd d.parenttype = self.doc.doctype d.save(check_links=False, ignore_fields = self.ignore_fields) child_map.setdefault(d.doctype, []).append(d.name) # delete all children in database that are not in the child_map # get all children types tablefields = webnotes.model.meta.get_table_fields(self.doc.doctype) for dt in tablefields: if dt[0] not in self.ignore_children_type: cnames = child_map.get(dt[0]) or [] if cnames: webnotes.conn.sql("""delete from `tab%s` where parent=%s and parenttype=%s and name not in (%s)""" % (dt[0], '%s', '%s', ','.join(['%s'] * len(cnames))), tuple([self.doc.name, self.doc.doctype] + cnames)) else: webnotes.conn.sql("""delete from `tab%s` where parent=%s and parenttype=%s""" \ % (dt[0], '%s', '%s'), (self.doc.name, self.doc.doctype)) def insert(self): self.doc.fields["__islocal"] = 1 self.set_defaults() if webnotes.in_test: if self.meta.get_field("naming_series"): self.doc.naming_series = "_T-" + self.doc.doctype + "-" return self.save() def insert_or_update(self): if webnotes.conn.exists( self.doc.doctype, self.doc.name): return self.save() else: return self.insert() def set_defaults(self): if webnotes.in_import: return new_docs = {} new_doclist = [] for d in self.doclist: if not d.doctype in new_docs: new_docs[d.doctype] = webnotes.new_doc(d.doctype) newd = webnotes.doc(new_docs[d.doctype].fields.copy()) newd.fields.update(d.fields) new_doclist.append(newd) self.set_doclist(new_doclist) def has_read_perm(self): return webnotes.has_permission(self.doc.doctype, "read", self.doc) def save(self, check_links=1): perm_to_check = "write" if self.doc.fields.get("__islocal"): perm_to_check = "create" if not self.doc.owner: self.doc.owner = webnotes.session.user if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, perm_to_check, self.doc): self.to_docstatus = 0 self.prepare_for_save("save") if not self.ignore_validate: self.run_method('validate') if not self.ignore_mandatory: self.check_mandatory() self.save_main() self.save_children() self.run_method('on_update') else: self.no_permission_to(_(perm_to_check.title())) return self def submit(self): if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "submit", self.doc): self.to_docstatus = 1 self.prepare_for_save("submit") self.run_method('validate') self.check_mandatory() self.save_main() self.save_children() self.run_method('on_update') self.run_method('on_submit') else: self.no_permission_to(_("Submit")) return self def cancel(self): if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "cancel", self.doc): self.to_docstatus = 2 self.prepare_for_save("cancel") self.run_method('before_cancel') self.save_main() self.save_children() self.run_method('on_cancel') self.check_no_back_links_exist() else: self.no_permission_to(_("Cancel")) return self def update_after_submit(self): if self.doc.docstatus != 1: webnotes.msgprint("Only to called after submit", raise_exception=1) if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "write", self.doc): self.to_docstatus = 1 self.prepare_for_save("update_after_submit") self.run_method('before_update_after_submit') self.save_main() self.save_children() self.run_method('on_update_after_submit') else: self.no_permission_to(_("Update")) return self def delete(self): webnotes.delete_doc(self.doc.doctype, self.doc.name) def no_permission_to(self, ptype): webnotes.msgprint(("%s (%s): " % (self.doc.name, _(self.doc.doctype))) + \ _("No Permission to ") + ptype, raise_exception=BeanPermissionError) def check_no_back_links_exist(self): from webnotes.model.utils import check_if_doc_is_linked check_if_doc_is_linked(self.doc.doctype, self.doc.name, method="Cancel") def check_mandatory(self): missing = [] for doc in self.doclist: for df in self.meta: if df.doctype=="DocField" and df.reqd and df.parent==doc.doctype and df.fieldname!="naming_series": msg = "" if df.fieldtype == "Table": if not self.doclist.get({"parentfield": df.fieldname}): msg = _("Error") + ": " + _("Data missing in table") + ": " + _(df.label) elif doc.fields.get(df.fieldname) is None: msg = _("Error") + ": " if doc.parentfield: msg += _("Row") + (" # %s: " % (doc.idx,)) msg += _("Value missing for") + ": " + _(df.label) if msg: missing.append([msg, df.fieldname]) if missing: for msg, fieldname in missing: msgprint(msg) raise webnotes.MandatoryError, ", ".join([fieldname for msg, fieldname in missing]) def convert_type(self, doc): if doc.doctype==doc.name and doc.doctype!="DocType": for df in self.meta.get({"doctype": "DocField", "parent": doc.doctype}): if df.fieldtype in ("Int", "Check"): doc.fields[df.fieldname] = cint(doc.fields.get(df.fieldname)) elif df.fieldtype in ("Float", "Currency"): doc.fields[df.fieldname] = flt(doc.fields.get(df.fieldname)) doc.docstatus = cint(doc.docstatus) def clone(source_wrapper): """ make a clone of a document""" if isinstance(source_wrapper, list): source_wrapper = Bean(source_wrapper) new_wrapper = Bean(source_wrapper.doclist.copy()) if new_wrapper.doc.fields.get("amended_from"): new_wrapper.doc.fields["amended_from"] = None if new_wrapper.doc.fields.get("amendment_date"): new_wrapper.doc.fields["amendment_date"] = None for d in new_wrapper.doclist: d.fields.update({ "name": None, "__islocal": 1, "docstatus": 0, }) return new_wrapper def notify(controller, caller_method): try: from startup.observers import observer_map except ImportError: return doctype = controller.doc.doctype def call_observers(key): if key in observer_map: observer_list = observer_map[key] if isinstance(observer_list, basestring): observer_list = [observer_list] for observer_method in observer_list: webnotes.get_method(observer_method)(controller, caller_method) call_observers("*:*") call_observers(doctype + ":*") call_observers("*:" + caller_method) call_observers(doctype + ":" + caller_method) # for bc def getlist(doclist, parentfield): """ Return child records of a particular type """ import webnotes.model.utils return webnotes.model.utils.getlist(doclist, parentfield) def copy_doclist(doclist, no_copy = []): """ Make a copy of the doclist """ import webnotes.model.utils return webnotes.model.utils.copy_doclist(doclist, no_copy)
en
0.848497
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. # MIT License. See license.txt Transactions are defined as collection of classes, a Bean represents collection of Document objects for a transaction with main and children. Group actions like save, etc are performed on doclists Collection of Documents with one parent and multiple children Load doclist from dt # get all children types # load chilren # update doclist before running any method select modified, docstatus from `tab%s` where name="%s" for update This record does not exist. Please refresh. [Link Validation] Could not find the following values: %s. Please correct and resave. Document Not Saved. # don't update deleted # if parent is new, all children should be new # prompt if cancelled # rename if reqd # delete all children in database that are not in the child_map # get all children types delete from `tab%s` where parent=%s and parenttype=%s and name not in (%s) delete from `tab%s` where parent=%s and parenttype=%s # %s: " % (doc.idx,)) make a clone of a document # for bc Return child records of a particular type Make a copy of the doclist
1.983729
2
part2.py
TanishqGoel/ValueIteration
0
6629761
import numpy as np from copy import deepcopy from functools import reduce import sys from operator import add HashArr=["C","N","E","S","W"] HashArr1=["R","D"] NewUtil=np.NINF BestAction="NULL" HEALTH_RANGE = 5 ARROWS_RANGE = 4 MATERIALS_RANGE=3 POSITION_RANGE=5 MONSTER_STATES_RANGE=2 ACTION_RANGE=10 HEALTH_VALUES = tuple(range(HEALTH_RANGE)) ARROWS_VALUES = tuple(range(ARROWS_RANGE)) MATERIALS_VALUES = tuple(range(MATERIALS_RANGE)) POSITION_VALUES = tuple(range(POSITION_RANGE)) MONSTER_STATE_VALUES=tuple(range(MONSTER_STATES_RANGE)) ACTION_VALUES=tuple(range(ACTION_RANGE)) #print(ACTION_VALUES) HEALTH_FACTOR = 25 # 0, 25, 50, 75, 100 ARROWS_FACTOR = 1 # 0, 1, 2, 3 MATERIALS_FACTOR = 1 # 0, 1, 2 POSITION_FACTOR=1 # 0, 1, 2, 3, 4 MONSTER_STATES_FACTOR=1 #0: Ready, 1: Dormant ACTION_FACTOR=1 ACTION_SHOOT = 0 ACTION_HIT = 1 ACTION_UP=2 ACTION_DOWN=3 ACTION_RIGHT=4 ACTION_LEFT=5 ACTION_STAY=6 ACTION_GATHER=7 ACTION_CRAFT=8 ACTION_STAY=9 TEAM = 34 Y = [1/2, 1,2] PRIZE = 50 COST = -10/Y[TEAM%3] #COST=-10 GAMMA = 0.999 DELTA = 0.001 # Center=0, North=1, East=2, South=3, West=4 utilities = np.zeros((HEALTH_RANGE, ARROWS_RANGE, MATERIALS_RANGE, POSITION_RANGE,MONSTER_STATES_RANGE)) policies = np.full((HEALTH_RANGE, ARROWS_RANGE, MATERIALS_RANGE, POSITION_RANGE, MONSTER_STATES_RANGE), -1, dtype='int') temp=np.zeros(utilities.shape) orig_stdout = sys.stdout f = open("./outputs/part_2_trace.txt", "w") sys.stdout = f def value_iteration(): global utilities index = 0 while True: # one iteration of value iteration delta = 0 # temp=np.zeros(utilities.shape) for l in range(0,5): for k in range(0,3): for j in range(0,4): for m in range(0,2): for i in range (0,5): stt=State(i,j,k,l,m) ANS=action(stt) temp[stt.show()],policies[stt.show()]=action(stt) delta=max(delta, abs(temp[stt.show()]-utilities[stt.show()])) utilities=deepcopy(temp) trace(index, utilities, policies) index +=1 if delta <= DELTA: break class State: def __init__(self, enemy_health, num_arrows,num_materials,num_position,monster_state): if (enemy_health not in HEALTH_VALUES) or (num_arrows not in ARROWS_VALUES) or (num_materials not in MATERIALS_VALUES) or (num_position not in POSITION_VALUES) or (monster_state not in MONSTER_STATE_VALUES) : print(enemy_health,num_arrows,num_materials,num_position,monster_state) raise ValueError self.health = enemy_health self.arrows = num_arrows self.materials = num_materials self.position = num_position self.monsterState = monster_state def show(self): return (self.health, self.arrows, self.materials, self.position, self.monsterState) def __str__(self): return f'({self.health},{self.arrows},{self.materials},{self.position},{self.monsterState})' REWARD = np.zeros((HEALTH_RANGE, ARROWS_RANGE, MATERIALS_RANGE, POSITION_RANGE, MONSTER_STATES_RANGE)) REWARD[0, :, :, :, :] = PRIZE def action(state): # returns cost, array of tuple of (probability, state) # state = State(*state) NewUtil=np.NINF BestAction=-2 DownCost=np.NINF UpCost=np.NINF LeftCost=np.NINF RightCost=np.NINF StayCost=np.NINF CraftCost=np.NINF HitCost=np.NINF ShootCost=np.NINF GatherCost=np.NINF if(state.health==0): return 0, -1 #"NONE" if(state.position==0): #Pos::center #Availaible actions : Up, Down, Right, Left, None, shoot, stay, hit #Move Down if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,3,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,3,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action DownCost=( 0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,3,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL DownCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #Move Up if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,1,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,1,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action UpCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,1,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL UpCost=( 0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #Move Right if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action RightCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL RightCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #Move Left if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,4,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,4,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action LeftCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,4,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL LeftCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #STAY if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,0,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,0,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action StayCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL StayCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #SHOOT if(state.arrows>=1): if(state.monsterState==1): #Dormant state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,0,1) #MM stays dormant : Success of Action state2= State( max(state.health-1,0) , state.arrows-1, state.materials,0,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows-1, state.materials,0,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows-1, state.materials,0,0) #MM becomes ready : Failure of Action ShootCost= (0.5 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.5 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows-1, state.materials,0,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL ShootCost= (0.5 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.5 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #HIT if(state.monsterState==1): #Dormant state state1= State( max(state.health-2,0) , state.arrows, state.materials,0,1) #MM stays dormant : Success of Action state2= State( max(state.health-2,0) , state.arrows, state.materials,0,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows, state.materials,0,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,0,0) #MM becomes ready : Failure of Action HitCost= (0.1 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.1 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.9 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.9 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-2,0) , state.arrows, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , max(state.arrows-1,0), state.materials,0,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL HitCost= (0.1 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + ( 0.1 ) * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.9 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + ( 0.9 ) * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) elif(state.position==1): #Pos::North #Availaible actions : Down, None, Craft, stay #Move Down if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready DownCost=(0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack DownCost=(0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #STAY if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,1,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,1,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready StayCost= (0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,1,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,1,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack StayCost= (0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #Craft if(state.materials>=1): if(state.monsterState==1): #Dormant state1= State( state.health , min(3,state.arrows+1), state.materials-1,1,1) state2= State( state.health , min(3,state.arrows+2), state.materials-1,1,1) state3= State( state.health , min(3,state.arrows+3), state.materials-1,1,1) state4= State( state.health , min(3,state.arrows+1), state.materials-1,1,0) state5= State( state.health , min(3,state.arrows+2), state.materials-1,1,0) state6= State( state.health , min(3,state.arrows+3), state.materials-1,1,0) CraftCost= (0.5*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.35 *0.8 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 *0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()]) + 0.35 *0.2*(COST + REWARD[state5.show()] + GAMMA*utilities[state5.show()]) + 0.15 *0.2*(COST + REWARD[state6.show()] + GAMMA*utilities[state6.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , min(3,state.arrows+1), state.materials-1,1,0) state2= State( state.health , min(3,state.arrows+2), state.materials-1,1,0) state3= State( state.health , min(3,state.arrows+3), state.materials-1,1,0) state4= State( state.health , min(3,state.arrows+1), state.materials-1,1,1) state5= State( state.health , min(3,state.arrows+2), state.materials-1,1,1) state6= State( state.health , min(3,state.arrows+3), state.materials-1,1,1) CraftCost= (0.5*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.35 *0.5 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 *0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.5 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()]) + 0.35 *0.5*(COST + REWARD[state5.show()] + GAMMA*utilities[state5.show()]) + 0.15 *0.5*(COST + REWARD[state6.show()] + GAMMA*utilities[state6.show()])) elif(state.position==2): #Pos::East #Availaible actions : Left, None, shoot, stay, hit #Move Left if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,0,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,0,0) #MM becomes ready : Success of Action LeftCost= (0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL LeftCost= (0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()])) #STAY if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Success of Action StayCost= (0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) +0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL StayCost= (0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()])) #SHOOT if(state.arrows>=1): if(state.monsterState==1): #Dormant state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,2,1) #MM stays dormant : Success of Action state2= State( max(state.health-1,0) , state.arrows-1, state.materials,2,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows-1, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows-1, state.materials,2,0) #MM becomes ready : Failure of Action ShootCost= (0.9 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.9 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.1 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.1 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows-1, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL ShootCost= (0.9 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.9* 0.5 *(COST + REWARD[state2.show()] - 40 + GAMMA*utilities[state2.show()]) + 0.1 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.1 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #HIT if(state.monsterState==1): #Dormant state state1= State( max(state.health-2,0) , state.arrows, state.materials,2,1) #MM stays dormant : Success of Action state2= State( max(state.health-2,0) , state.arrows, state.materials,2,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action HitCost= (0.2 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.8 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.8 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-2,0) , state.arrows, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL HitCost= (0.2 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + (0.2) * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.8 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + (0.8) * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) elif(state.position==3): #Pos::South #Availaible actions : Up, None, Gather, stay #Move Up if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready UpCost=(0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack UpCost=(0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #STAY if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,3,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,3,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready StayCost= (0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,3,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,3,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack StayCost= (0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #Gather if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, min(state.materials+1,2),3,1) state2= State( state.health , state.arrows, state.materials,3,1) state3= State( state.health , state.arrows, min(state.materials+1,2),3,0) state4= State( state.health , state.arrows, state.materials,3,0) GatherCost = (0.75*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.25 *0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.75*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.25 *0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0):# Ready state1= State( state.health , state.arrows, min(state.materials+1,2),3,0) state2= State( state.health , state.arrows, state.materials,3,0) state3= State( state.health , state.arrows, min(state.materials+1,2),3,1) state4= State( state.health , state.arrows, state.materials,3,1) GatherCost = (0.75*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.25 *0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.75*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.25 *0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.position==4): #Pos::West #Availaible actions : Right, None, shoot, stay #Move Right if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,0,1) # Success of Action state2= State( state.health , state.arrows, state.materials,0,0) # Success of Action RightCost= (0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,0,0) # Success of Action state2= State( state.health , state.arrows, state.materials,0,1) # Success of Action RightCost= (0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) # #STAY # state1= State( state.health , state.arrows, state.materials,4,state.monsterState) # Success of Action # StayCost= (COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) #STAY if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,4,1) # Success of Action state2= State( state.health , state.arrows, state.materials,4,0) # Success of Action StayCost= (0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,4,0) # Success of Action state2= State( state.health , state.arrows, state.materials,4,1) # Success of Action StayCost= (0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) #SHOOT if(state.arrows>=1): if(state.monsterState==1): #Dormant state1= State( max(state.health-1,0) , state.arrows-1, state.materials,4,1)# Success of Action state2= State( state.health, state.arrows-1, state.materials,4,1) # Failure of Action state3= State( max(state.health-1,0) , state.arrows-1, state.materials,4,0)# Success of Action state4= State( state.health, state.arrows-1, state.materials,4,0) # Failure of Action ShootCost= (0.25*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.75*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.25*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.75*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( max(state.health-1,0) , state.arrows-1, state.materials,4,0)# Success of Action state2= State( state.health, state.arrows-1, state.materials,4,0) # Failure of Action state3= State( max(state.health-1,0) , state.arrows-1, state.materials,4,1)# Success of Action state4= State( state.health, state.arrows-1, state.materials,4,1) # Failure of Action ShootCost= (0.25*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.75*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.25*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.75*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) if(NewUtil<=DownCost): NewUtil=DownCost BestAction=0 #DOWN if(NewUtil<=UpCost): NewUtil=UpCost BestAction=1#"Up" if(NewUtil<=RightCost): NewUtil=RightCost BestAction=2#"Right" if(NewUtil<=LeftCost): NewUtil=LeftCost BestAction=3#"Left" if(NewUtil<=GatherCost): NewUtil=GatherCost BestAction=4#"Gather" if(NewUtil<=StayCost): NewUtil=StayCost BestAction=5#"Stay" if(NewUtil<=HitCost): NewUtil=HitCost BestAction=6#"Hit" if(NewUtil<=CraftCost): NewUtil=CraftCost BestAction=7#"Craft" if(NewUtil<=ShootCost): NewUtil=ShootCost BestAction=8 #"Shoot" # if(state.health==1 and state.arrows==0 and state.materials==0 and state.position==1 and state.monsterState==1): # print(DownCost) # print(UpCost) # print(LeftCost) # print(RightCost) # print(ShootCost) # print(StayCost) # print(HitCost) return NewUtil,BestAction def trace(iteration, utilities, policies): print(f'iteration={iteration}') # utilities=np.around(utilities,4) for state, util in np.ndenumerate(utilities): # util=np.around(util) util_str = '{:.3f}'.format(util) # if state[0] == 0: # print(f'{state}:{-1}=[{util_str}]') # continue if policies[state] == -1: act_str = 'NONE' elif policies[state] == 0: act_str = 'DOWN' elif policies[state] == 1: act_str = 'UP' elif policies[state] == 2: act_str = 'RIGHT' elif policies[state] == 3: act_str = 'LEFT' elif policies[state] == 4: act_str = 'GATHER' elif policies[state] == 5: act_str = 'STAY' elif policies[state] == 6: act_str = 'HIT' elif policies[state] == 7: act_str = 'CRAFT' elif policies[state] == 8: act_str = 'SHOOT' print(f'({HashArr[state[3]]},{state[2]},{state[1]},{HashArr1[state[4]]},{state[0] * 25}):{act_str}=[{util_str}]') print("\n\n") # SSS=State(1,0,0,0,1) # print(action(SSS)) value_iteration() sys.stdout = orig_stdout f.close()
import numpy as np from copy import deepcopy from functools import reduce import sys from operator import add HashArr=["C","N","E","S","W"] HashArr1=["R","D"] NewUtil=np.NINF BestAction="NULL" HEALTH_RANGE = 5 ARROWS_RANGE = 4 MATERIALS_RANGE=3 POSITION_RANGE=5 MONSTER_STATES_RANGE=2 ACTION_RANGE=10 HEALTH_VALUES = tuple(range(HEALTH_RANGE)) ARROWS_VALUES = tuple(range(ARROWS_RANGE)) MATERIALS_VALUES = tuple(range(MATERIALS_RANGE)) POSITION_VALUES = tuple(range(POSITION_RANGE)) MONSTER_STATE_VALUES=tuple(range(MONSTER_STATES_RANGE)) ACTION_VALUES=tuple(range(ACTION_RANGE)) #print(ACTION_VALUES) HEALTH_FACTOR = 25 # 0, 25, 50, 75, 100 ARROWS_FACTOR = 1 # 0, 1, 2, 3 MATERIALS_FACTOR = 1 # 0, 1, 2 POSITION_FACTOR=1 # 0, 1, 2, 3, 4 MONSTER_STATES_FACTOR=1 #0: Ready, 1: Dormant ACTION_FACTOR=1 ACTION_SHOOT = 0 ACTION_HIT = 1 ACTION_UP=2 ACTION_DOWN=3 ACTION_RIGHT=4 ACTION_LEFT=5 ACTION_STAY=6 ACTION_GATHER=7 ACTION_CRAFT=8 ACTION_STAY=9 TEAM = 34 Y = [1/2, 1,2] PRIZE = 50 COST = -10/Y[TEAM%3] #COST=-10 GAMMA = 0.999 DELTA = 0.001 # Center=0, North=1, East=2, South=3, West=4 utilities = np.zeros((HEALTH_RANGE, ARROWS_RANGE, MATERIALS_RANGE, POSITION_RANGE,MONSTER_STATES_RANGE)) policies = np.full((HEALTH_RANGE, ARROWS_RANGE, MATERIALS_RANGE, POSITION_RANGE, MONSTER_STATES_RANGE), -1, dtype='int') temp=np.zeros(utilities.shape) orig_stdout = sys.stdout f = open("./outputs/part_2_trace.txt", "w") sys.stdout = f def value_iteration(): global utilities index = 0 while True: # one iteration of value iteration delta = 0 # temp=np.zeros(utilities.shape) for l in range(0,5): for k in range(0,3): for j in range(0,4): for m in range(0,2): for i in range (0,5): stt=State(i,j,k,l,m) ANS=action(stt) temp[stt.show()],policies[stt.show()]=action(stt) delta=max(delta, abs(temp[stt.show()]-utilities[stt.show()])) utilities=deepcopy(temp) trace(index, utilities, policies) index +=1 if delta <= DELTA: break class State: def __init__(self, enemy_health, num_arrows,num_materials,num_position,monster_state): if (enemy_health not in HEALTH_VALUES) or (num_arrows not in ARROWS_VALUES) or (num_materials not in MATERIALS_VALUES) or (num_position not in POSITION_VALUES) or (monster_state not in MONSTER_STATE_VALUES) : print(enemy_health,num_arrows,num_materials,num_position,monster_state) raise ValueError self.health = enemy_health self.arrows = num_arrows self.materials = num_materials self.position = num_position self.monsterState = monster_state def show(self): return (self.health, self.arrows, self.materials, self.position, self.monsterState) def __str__(self): return f'({self.health},{self.arrows},{self.materials},{self.position},{self.monsterState})' REWARD = np.zeros((HEALTH_RANGE, ARROWS_RANGE, MATERIALS_RANGE, POSITION_RANGE, MONSTER_STATES_RANGE)) REWARD[0, :, :, :, :] = PRIZE def action(state): # returns cost, array of tuple of (probability, state) # state = State(*state) NewUtil=np.NINF BestAction=-2 DownCost=np.NINF UpCost=np.NINF LeftCost=np.NINF RightCost=np.NINF StayCost=np.NINF CraftCost=np.NINF HitCost=np.NINF ShootCost=np.NINF GatherCost=np.NINF if(state.health==0): return 0, -1 #"NONE" if(state.position==0): #Pos::center #Availaible actions : Up, Down, Right, Left, None, shoot, stay, hit #Move Down if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,3,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,3,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action DownCost=( 0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,3,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL DownCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #Move Up if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,1,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,1,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action UpCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,1,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL UpCost=( 0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #Move Right if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action RightCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL RightCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #Move Left if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,4,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,4,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action LeftCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,4,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL LeftCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #STAY if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,0,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,0,0) #MM becomes ready : Success of Action state3= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action StayCost= (0.85 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL StayCost= (0.85 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.85 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.15 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #SHOOT if(state.arrows>=1): if(state.monsterState==1): #Dormant state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,0,1) #MM stays dormant : Success of Action state2= State( max(state.health-1,0) , state.arrows-1, state.materials,0,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows-1, state.materials,0,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows-1, state.materials,0,0) #MM becomes ready : Failure of Action ShootCost= (0.5 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.5 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows-1, state.materials,0,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL ShootCost= (0.5 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.5 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #HIT if(state.monsterState==1): #Dormant state state1= State( max(state.health-2,0) , state.arrows, state.materials,0,1) #MM stays dormant : Success of Action state2= State( max(state.health-2,0) , state.arrows, state.materials,0,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows, state.materials,0,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,0,0) #MM becomes ready : Failure of Action HitCost= (0.1 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.1 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.9 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.9 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-2,0) , state.arrows, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , max(state.arrows-1,0), state.materials,0,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,0,1) #MM attacks and become dormant : UNSUCCESSFUL HitCost= (0.1 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + ( 0.1 ) * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.9 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + ( 0.9 ) * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) elif(state.position==1): #Pos::North #Availaible actions : Down, None, Craft, stay #Move Down if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready DownCost=(0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack DownCost=(0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #STAY if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,1,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,1,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready StayCost= (0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,1,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,1,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack StayCost= (0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #Craft if(state.materials>=1): if(state.monsterState==1): #Dormant state1= State( state.health , min(3,state.arrows+1), state.materials-1,1,1) state2= State( state.health , min(3,state.arrows+2), state.materials-1,1,1) state3= State( state.health , min(3,state.arrows+3), state.materials-1,1,1) state4= State( state.health , min(3,state.arrows+1), state.materials-1,1,0) state5= State( state.health , min(3,state.arrows+2), state.materials-1,1,0) state6= State( state.health , min(3,state.arrows+3), state.materials-1,1,0) CraftCost= (0.5*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.35 *0.8 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 *0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()]) + 0.35 *0.2*(COST + REWARD[state5.show()] + GAMMA*utilities[state5.show()]) + 0.15 *0.2*(COST + REWARD[state6.show()] + GAMMA*utilities[state6.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , min(3,state.arrows+1), state.materials-1,1,0) state2= State( state.health , min(3,state.arrows+2), state.materials-1,1,0) state3= State( state.health , min(3,state.arrows+3), state.materials-1,1,0) state4= State( state.health , min(3,state.arrows+1), state.materials-1,1,1) state5= State( state.health , min(3,state.arrows+2), state.materials-1,1,1) state6= State( state.health , min(3,state.arrows+3), state.materials-1,1,1) CraftCost= (0.5*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.35 *0.5 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.15 *0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.5 * 0.5 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()]) + 0.35 *0.5*(COST + REWARD[state5.show()] + GAMMA*utilities[state5.show()]) + 0.15 *0.5*(COST + REWARD[state6.show()] + GAMMA*utilities[state6.show()])) elif(state.position==2): #Pos::East #Availaible actions : Left, None, shoot, stay, hit #Move Left if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,0,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,0,0) #MM becomes ready : Success of Action LeftCost= (0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,0,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL LeftCost= (0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()])) #STAY if(state.monsterState==1): #Dormant state state1= State( state.health , state.arrows, state.materials,2,1) #MM stays dormant : Success of Action state2= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Success of Action StayCost= (0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) +0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state state1= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL StayCost= (0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()])) #SHOOT if(state.arrows>=1): if(state.monsterState==1): #Dormant state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,2,1) #MM stays dormant : Success of Action state2= State( max(state.health-1,0) , state.arrows-1, state.materials,2,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows-1, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows-1, state.materials,2,0) #MM becomes ready : Failure of Action ShootCost= (0.9 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.9 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.1 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.1 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-1,0) , state.arrows-1, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows-1, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL ShootCost= (0.9 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.9* 0.5 *(COST + REWARD[state2.show()] - 40 + GAMMA*utilities[state2.show()]) + 0.1 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.1 * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) #HIT if(state.monsterState==1): #Dormant state state1= State( max(state.health-2,0) , state.arrows, state.materials,2,1) #MM stays dormant : Success of Action state2= State( max(state.health-2,0) , state.arrows, state.materials,2,0) #MM becomes ready : Success of Action state3= State( state.health, state.arrows, state.materials,2,1) #MM stays dormant : Failure of Action state4= State( state.health , state.arrows, state.materials,2,0) #MM becomes ready : Failure of Action HitCost= (0.2 * 0.8 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2 * 0.2 *(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.8 * 0.8 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.8 * 0.2 *(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state state1= State( max(state.health-2,0) , state.arrows, state.materials,2,0) #MM stays ready : Success of Action state2= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL state3= State( state.health , state.arrows, state.materials,2,0) #MM stays ready : Failure of Action state4= State( min(state.health+1,4) , 0, state.materials,2,1) #MM attacks and become dormant : UNSUCCESSFUL HitCost= (0.2 * 0.5 *(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + (0.2) * 0.5 *(COST + REWARD[state2.show()] - 40+ GAMMA*utilities[state2.show()]) + 0.8 * 0.5 *(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + (0.8) * 0.5 *(COST + REWARD[state4.show()] -40 + GAMMA*utilities[state4.show()])) elif(state.position==3): #Pos::South #Availaible actions : Up, None, Gather, stay #Move Up if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready UpCost=(0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,0,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,0,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack UpCost=(0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #STAY if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,3,1) #Success of Action : Stays in Dormant state2= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Stays in Dormant state3= State( state.health , state.arrows, state.materials,3,0) #Success of Action : Becomes ready state4= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Becomes ready StayCost= (0.85*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,3,0) #Success of Action : Stays Ready state2= State( state.health , state.arrows, state.materials,2,0) #Failure of Action : Stays Ready state3= State( state.health , state.arrows, state.materials,3,1) #Success of Action : Attack state4= State( state.health , state.arrows, state.materials,2,1) #Failure of Action : Attack StayCost= (0.85*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.15*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.85*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.15*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) #Gather if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, min(state.materials+1,2),3,1) state2= State( state.health , state.arrows, state.materials,3,1) state3= State( state.health , state.arrows, min(state.materials+1,2),3,0) state4= State( state.health , state.arrows, state.materials,3,0) GatherCost = (0.75*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.25 *0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.75*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.25 *0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0):# Ready state1= State( state.health , state.arrows, min(state.materials+1,2),3,0) state2= State( state.health , state.arrows, state.materials,3,0) state3= State( state.health , state.arrows, min(state.materials+1,2),3,1) state4= State( state.health , state.arrows, state.materials,3,1) GatherCost = (0.75*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.25 *0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.75*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.25 *0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.position==4): #Pos::West #Availaible actions : Right, None, shoot, stay #Move Right if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,0,1) # Success of Action state2= State( state.health , state.arrows, state.materials,0,0) # Success of Action RightCost= (0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,0,0) # Success of Action state2= State( state.health , state.arrows, state.materials,0,1) # Success of Action RightCost= (0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) # #STAY # state1= State( state.health , state.arrows, state.materials,4,state.monsterState) # Success of Action # StayCost= (COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) #STAY if(state.monsterState==1): #Dormant state1= State( state.health , state.arrows, state.materials,4,1) # Success of Action state2= State( state.health , state.arrows, state.materials,4,0) # Success of Action StayCost= (0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.2*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) elif(state.monsterState==0): #Ready state1= State( state.health , state.arrows, state.materials,4,0) # Success of Action state2= State( state.health , state.arrows, state.materials,4,1) # Success of Action StayCost= (0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()])) #SHOOT if(state.arrows>=1): if(state.monsterState==1): #Dormant state1= State( max(state.health-1,0) , state.arrows-1, state.materials,4,1)# Success of Action state2= State( state.health, state.arrows-1, state.materials,4,1) # Failure of Action state3= State( max(state.health-1,0) , state.arrows-1, state.materials,4,0)# Success of Action state4= State( state.health, state.arrows-1, state.materials,4,0) # Failure of Action ShootCost= (0.25*0.8*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.75*0.8*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.25*0.2*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.75*0.2*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) elif(state.monsterState==0): #Ready state1= State( max(state.health-1,0) , state.arrows-1, state.materials,4,0)# Success of Action state2= State( state.health, state.arrows-1, state.materials,4,0) # Failure of Action state3= State( max(state.health-1,0) , state.arrows-1, state.materials,4,1)# Success of Action state4= State( state.health, state.arrows-1, state.materials,4,1) # Failure of Action ShootCost= (0.25*0.5*(COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) + 0.75*0.5*(COST + REWARD[state2.show()] + GAMMA*utilities[state2.show()]) + 0.25*0.5*(COST + REWARD[state3.show()] + GAMMA*utilities[state3.show()]) + 0.75*0.5*(COST + REWARD[state4.show()] + GAMMA*utilities[state4.show()])) if(NewUtil<=DownCost): NewUtil=DownCost BestAction=0 #DOWN if(NewUtil<=UpCost): NewUtil=UpCost BestAction=1#"Up" if(NewUtil<=RightCost): NewUtil=RightCost BestAction=2#"Right" if(NewUtil<=LeftCost): NewUtil=LeftCost BestAction=3#"Left" if(NewUtil<=GatherCost): NewUtil=GatherCost BestAction=4#"Gather" if(NewUtil<=StayCost): NewUtil=StayCost BestAction=5#"Stay" if(NewUtil<=HitCost): NewUtil=HitCost BestAction=6#"Hit" if(NewUtil<=CraftCost): NewUtil=CraftCost BestAction=7#"Craft" if(NewUtil<=ShootCost): NewUtil=ShootCost BestAction=8 #"Shoot" # if(state.health==1 and state.arrows==0 and state.materials==0 and state.position==1 and state.monsterState==1): # print(DownCost) # print(UpCost) # print(LeftCost) # print(RightCost) # print(ShootCost) # print(StayCost) # print(HitCost) return NewUtil,BestAction def trace(iteration, utilities, policies): print(f'iteration={iteration}') # utilities=np.around(utilities,4) for state, util in np.ndenumerate(utilities): # util=np.around(util) util_str = '{:.3f}'.format(util) # if state[0] == 0: # print(f'{state}:{-1}=[{util_str}]') # continue if policies[state] == -1: act_str = 'NONE' elif policies[state] == 0: act_str = 'DOWN' elif policies[state] == 1: act_str = 'UP' elif policies[state] == 2: act_str = 'RIGHT' elif policies[state] == 3: act_str = 'LEFT' elif policies[state] == 4: act_str = 'GATHER' elif policies[state] == 5: act_str = 'STAY' elif policies[state] == 6: act_str = 'HIT' elif policies[state] == 7: act_str = 'CRAFT' elif policies[state] == 8: act_str = 'SHOOT' print(f'({HashArr[state[3]]},{state[2]},{state[1]},{HashArr1[state[4]]},{state[0] * 25}):{act_str}=[{util_str}]') print("\n\n") # SSS=State(1,0,0,0,1) # print(action(SSS)) value_iteration() sys.stdout = orig_stdout f.close()
en
0.794079
#print(ACTION_VALUES) # 0, 25, 50, 75, 100 # 0, 1, 2, 3 # 0, 1, 2 # 0, 1, 2, 3, 4 #0: Ready, 1: Dormant #COST=-10 # Center=0, North=1, East=2, South=3, West=4 # one iteration of value iteration # temp=np.zeros(utilities.shape) # returns cost, array of tuple of (probability, state) # state = State(*state) #"NONE" #Pos::center #Availaible actions : Up, Down, Right, Left, None, shoot, stay, hit #Move Down #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #Move Up #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #Move Right #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #Move Left #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #STAY #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #SHOOT #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #HIT #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #Pos::North #Availaible actions : Down, None, Craft, stay #Move Down #Dormant #Success of Action : Stays in Dormant #Failure of Action : Stays in Dormant #Success of Action : Becomes ready #Failure of Action : Becomes ready #Ready #Success of Action : Stays Ready #Failure of Action : Stays Ready #Success of Action : Attack #Failure of Action : Attack #STAY #Dormant #Success of Action : Stays in Dormant #Failure of Action : Stays in Dormant #Success of Action : Becomes ready #Failure of Action : Becomes ready #Ready #Success of Action : Stays Ready #Failure of Action : Stays Ready #Success of Action : Attack #Failure of Action : Attack #Craft #Dormant #Ready #Pos::East #Availaible actions : Left, None, shoot, stay, hit #Move Left #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #STAY #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #SHOOT #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #HIT #Dormant state #MM stays dormant : Success of Action #MM becomes ready : Success of Action #MM stays dormant : Failure of Action #MM becomes ready : Failure of Action #Ready state #MM stays ready : Success of Action #MM attacks and become dormant : UNSUCCESSFUL #MM stays ready : Failure of Action #MM attacks and become dormant : UNSUCCESSFUL #Pos::South #Availaible actions : Up, None, Gather, stay #Move Up #Dormant #Success of Action : Stays in Dormant #Failure of Action : Stays in Dormant #Success of Action : Becomes ready #Failure of Action : Becomes ready #Ready #Success of Action : Stays Ready #Failure of Action : Stays Ready #Success of Action : Attack #Failure of Action : Attack #STAY #Dormant #Success of Action : Stays in Dormant #Failure of Action : Stays in Dormant #Success of Action : Becomes ready #Failure of Action : Becomes ready #Ready #Success of Action : Stays Ready #Failure of Action : Stays Ready #Success of Action : Attack #Failure of Action : Attack #Gather #Dormant # Ready #Pos::West #Availaible actions : Right, None, shoot, stay #Move Right #Dormant # Success of Action # Success of Action #Ready # Success of Action # Success of Action # #STAY # state1= State( state.health , state.arrows, state.materials,4,state.monsterState) # Success of Action # StayCost= (COST + REWARD[state1.show()] + GAMMA*utilities[state1.show()]) #STAY #Dormant # Success of Action # Success of Action #Ready # Success of Action # Success of Action #SHOOT #Dormant # Success of Action # Failure of Action # Success of Action # Failure of Action #Ready # Success of Action # Failure of Action # Success of Action # Failure of Action #DOWN #"Up" #"Right" #"Left" #"Gather" #"Stay" #"Hit" #"Craft" #"Shoot" # if(state.health==1 and state.arrows==0 and state.materials==0 and state.position==1 and state.monsterState==1): # print(DownCost) # print(UpCost) # print(LeftCost) # print(RightCost) # print(ShootCost) # print(StayCost) # print(HitCost) # utilities=np.around(utilities,4) # util=np.around(util) # if state[0] == 0: # print(f'{state}:{-1}=[{util_str}]') # continue # SSS=State(1,0,0,0,1) # print(action(SSS))
2.410497
2
uniter_model/eval/nlvr2.py
intersun/LightningDOT
64
6629762
""" copied from official NLVR2 github python eval/nlvr2.py <output.csv> <annotation.json> """ import json import sys # Load the predictions file. Assume it is a CSV. predictions = { } for line in open(sys.argv[1]).readlines(): if line: splits = line.strip().split(",") # We assume identifiers are in the format "split-####-#-#.png". identifier = splits[0] prediction = splits[1] predictions[identifier] = prediction # Load the labeled examples. labeled_examples = [json.loads(line) for line in open(sys.argv[2]).readlines() if line] # If not, identify the ones that are missing, and exit. total_num = len(labeled_examples) if len(predictions) < total_num: print("Some predictions are missing!") print("Got " + str(len(predictions)) + " predictions but expected " + str(total_num)) for example in labeled_examples: lookup = example["identifier"] if not lookup in predictions: print("Missing prediction for item " + str(lookup)) exit() # Get the precision by iterating through the examples and checking the value # that was predicted. # Also update the "consistency" dictionary that keeps track of whether all # predictions for a given sentence were correct. num_correct = 0. consistency_dict = { } for example in labeled_examples: anon_label = example["identifier"].split("-") anon_label[2] = '' anon_label = '-'.join(anon_label) if not anon_label in consistency_dict: consistency_dict[anon_label] = True lookup = example["identifier"] prediction = predictions[lookup] if prediction.lower() == example["label"].lower(): num_correct += 1. else: consistency_dict[anon_label] = False # Calculate consistency. num_consistent = 0. unique_sentence = len(consistency_dict) for identifier, consistent in consistency_dict.items(): if consistent: num_consistent += 1 # Report values. print("accuracy=" + str(num_correct / total_num)) print("consistency=" + str(num_consistent / unique_sentence))
""" copied from official NLVR2 github python eval/nlvr2.py <output.csv> <annotation.json> """ import json import sys # Load the predictions file. Assume it is a CSV. predictions = { } for line in open(sys.argv[1]).readlines(): if line: splits = line.strip().split(",") # We assume identifiers are in the format "split-####-#-#.png". identifier = splits[0] prediction = splits[1] predictions[identifier] = prediction # Load the labeled examples. labeled_examples = [json.loads(line) for line in open(sys.argv[2]).readlines() if line] # If not, identify the ones that are missing, and exit. total_num = len(labeled_examples) if len(predictions) < total_num: print("Some predictions are missing!") print("Got " + str(len(predictions)) + " predictions but expected " + str(total_num)) for example in labeled_examples: lookup = example["identifier"] if not lookup in predictions: print("Missing prediction for item " + str(lookup)) exit() # Get the precision by iterating through the examples and checking the value # that was predicted. # Also update the "consistency" dictionary that keeps track of whether all # predictions for a given sentence were correct. num_correct = 0. consistency_dict = { } for example in labeled_examples: anon_label = example["identifier"].split("-") anon_label[2] = '' anon_label = '-'.join(anon_label) if not anon_label in consistency_dict: consistency_dict[anon_label] = True lookup = example["identifier"] prediction = predictions[lookup] if prediction.lower() == example["label"].lower(): num_correct += 1. else: consistency_dict[anon_label] = False # Calculate consistency. num_consistent = 0. unique_sentence = len(consistency_dict) for identifier, consistent in consistency_dict.items(): if consistent: num_consistent += 1 # Report values. print("accuracy=" + str(num_correct / total_num)) print("consistency=" + str(num_consistent / unique_sentence))
en
0.828511
copied from official NLVR2 github python eval/nlvr2.py <output.csv> <annotation.json> # Load the predictions file. Assume it is a CSV. # We assume identifiers are in the format "split-####-#-#.png". # Load the labeled examples. # If not, identify the ones that are missing, and exit. # Get the precision by iterating through the examples and checking the value # that was predicted. # Also update the "consistency" dictionary that keeps track of whether all # predictions for a given sentence were correct. # Calculate consistency. # Report values.
3.239065
3
users/services.py
Mohamed-Kaizen/home_recruiters
1
6629763
from typing import Dict, List, Optional from fastapi import HTTPException, status from tortoise import exceptions as tortoise_exceptions from .models import User, User_Pydantic from .schema import CustomerCreate, WorkerCreate from .utils import create_password_hash, verify_password class UserServices: @staticmethod async def create_super_user(data: Dict) -> User: password = create_password_hash(password=data.get("password")) user = await User.create( username=data.get("username"), email=data.get("email"), full_name=data.get("full_name"), password=password, is_superuser=data.get("is_superuser"), is_staff=data.get("is_staff"), is_active=data.get("is_active"), phone_number=data.get("phone_number"), ) return user async def authenticate(self, *, username: str, password: str) -> Optional[User]: user = await self.get_user_by_username(username=username) if not user: return None if not verify_password(plain_password=password, hashed_password=<PASSWORD>): return None return user @staticmethod async def get_worker_by_username(*, username: str): return await User_Pydantic.from_queryset_single( User.get(username=username, is_worker=True) ) @staticmethod async def get_worker(*, username: str): return await User.get(username=username, is_worker=True) @staticmethod async def get_user_by_username(*, username: str) -> Optional[User]: try: user = await User.get(username=username) return user except tortoise_exceptions.DoesNotExist: return None @staticmethod async def get_all_users() -> List[User]: return await User.all() @staticmethod async def get_all_customers() -> List[User]: return await User.filter(is_customer=True) @staticmethod async def get_all_workers() -> List[User]: return await User.filter(is_worker=True) @staticmethod async def create_worker(*, user: WorkerCreate) -> User: password = create_password_<PASSWORD>(password=<PASSWORD>) try: user = await User.create( username=user.username, email=user.email, full_name=user.full_name, password=password, phone_number=user.phone_number, career=user.career, is_worker=True, ) return user except tortoise_exceptions.IntegrityError: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="username or email or phone number already exists.", ) @staticmethod async def create_customer(*, user: CustomerCreate) -> User: password = create_password_<PASSWORD>(password=<PASSWORD>) try: user = await User.create( username=user.username, email=user.email, full_name=user.full_name, password=password, phone_number=user.phone_number, is_customer=True, ) return user except tortoise_exceptions.IntegrityError: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="username or email or phone number already exists.", )
from typing import Dict, List, Optional from fastapi import HTTPException, status from tortoise import exceptions as tortoise_exceptions from .models import User, User_Pydantic from .schema import CustomerCreate, WorkerCreate from .utils import create_password_hash, verify_password class UserServices: @staticmethod async def create_super_user(data: Dict) -> User: password = create_password_hash(password=data.get("password")) user = await User.create( username=data.get("username"), email=data.get("email"), full_name=data.get("full_name"), password=password, is_superuser=data.get("is_superuser"), is_staff=data.get("is_staff"), is_active=data.get("is_active"), phone_number=data.get("phone_number"), ) return user async def authenticate(self, *, username: str, password: str) -> Optional[User]: user = await self.get_user_by_username(username=username) if not user: return None if not verify_password(plain_password=password, hashed_password=<PASSWORD>): return None return user @staticmethod async def get_worker_by_username(*, username: str): return await User_Pydantic.from_queryset_single( User.get(username=username, is_worker=True) ) @staticmethod async def get_worker(*, username: str): return await User.get(username=username, is_worker=True) @staticmethod async def get_user_by_username(*, username: str) -> Optional[User]: try: user = await User.get(username=username) return user except tortoise_exceptions.DoesNotExist: return None @staticmethod async def get_all_users() -> List[User]: return await User.all() @staticmethod async def get_all_customers() -> List[User]: return await User.filter(is_customer=True) @staticmethod async def get_all_workers() -> List[User]: return await User.filter(is_worker=True) @staticmethod async def create_worker(*, user: WorkerCreate) -> User: password = create_password_<PASSWORD>(password=<PASSWORD>) try: user = await User.create( username=user.username, email=user.email, full_name=user.full_name, password=password, phone_number=user.phone_number, career=user.career, is_worker=True, ) return user except tortoise_exceptions.IntegrityError: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="username or email or phone number already exists.", ) @staticmethod async def create_customer(*, user: CustomerCreate) -> User: password = create_password_<PASSWORD>(password=<PASSWORD>) try: user = await User.create( username=user.username, email=user.email, full_name=user.full_name, password=password, phone_number=user.phone_number, is_customer=True, ) return user except tortoise_exceptions.IntegrityError: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="username or email or phone number already exists.", )
none
1
2.366214
2
tests/functional/Hydro/BlobTest/CloudMassFraction.py
jmikeowen/Spheral
22
6629764
<reponame>jmikeowen/Spheral #------------------------------------------------------------------------------- # Measure the cloud mass fraction for the blob test, as in Figure 6 from # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. # Fundamental differences between SPH and grid methods. Monthly Notices of # the Royal Astronomical Society. 2007; 380(3):963-978. # doi:10.1111/j.1365-2966.2007.12183.x. # # This method is generalized for both the 2D and 3D tests. #------------------------------------------------------------------------------- from NodeHistory import NodeHistory import Spheral import mpi class CloudMassFraction(NodeHistory): #--------------------------------------------------------------------------- # Constructor #--------------------------------------------------------------------------- def __init__(self, r0, # initial blob radius rhoThreshold, # density cutoff epsThreshold, # specific thermal energy cutoff nodes, # blob NodeList filename): # file to write the results to self.r0 = r0 self.rho0 = rhoThreshold self.eps0 = epsThreshold NodeHistory.__init__(self, nodes, [], self.measureCloudFraction, filename, labels = ("mfrac","mass","volume")) # Check our dimensionality if isinstance(nodes, Spheral.NodeList2d): self.ndim = 2 elif isinstance(nodes, Spheral.NodeList3d): self.ndim = 3 else: raise RuntimeError, "What the heck is %s?" % nodes # Find the starting mass of the cloud. self.M0 = nodes.mass().sumElements() return #--------------------------------------------------------------------------- # Do our measurements. #--------------------------------------------------------------------------- def measureCloudFraction(self, nodes, indices): mass = nodes.mass() rho = nodes.massDensity() eps = nodes.specificThermalEnergy() msum, volsum = 0.0, 0.0 for i in xrange(nodes.numInternalNodes): if rho[i] > self.rho0 and eps[i] < self.eps0: msum += mass[i] volsum += mass[i]/rho[i] msum = mpi.allreduce(msum, mpi.SUM) volsum = mpi.allreduce(volsum, mpi.SUM) return msum/self.M0, msum, volsum
#------------------------------------------------------------------------------- # Measure the cloud mass fraction for the blob test, as in Figure 6 from # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. # Fundamental differences between SPH and grid methods. Monthly Notices of # the Royal Astronomical Society. 2007; 380(3):963-978. # doi:10.1111/j.1365-2966.2007.12183.x. # # This method is generalized for both the 2D and 3D tests. #------------------------------------------------------------------------------- from NodeHistory import NodeHistory import Spheral import mpi class CloudMassFraction(NodeHistory): #--------------------------------------------------------------------------- # Constructor #--------------------------------------------------------------------------- def __init__(self, r0, # initial blob radius rhoThreshold, # density cutoff epsThreshold, # specific thermal energy cutoff nodes, # blob NodeList filename): # file to write the results to self.r0 = r0 self.rho0 = rhoThreshold self.eps0 = epsThreshold NodeHistory.__init__(self, nodes, [], self.measureCloudFraction, filename, labels = ("mfrac","mass","volume")) # Check our dimensionality if isinstance(nodes, Spheral.NodeList2d): self.ndim = 2 elif isinstance(nodes, Spheral.NodeList3d): self.ndim = 3 else: raise RuntimeError, "What the heck is %s?" % nodes # Find the starting mass of the cloud. self.M0 = nodes.mass().sumElements() return #--------------------------------------------------------------------------- # Do our measurements. #--------------------------------------------------------------------------- def measureCloudFraction(self, nodes, indices): mass = nodes.mass() rho = nodes.massDensity() eps = nodes.specificThermalEnergy() msum, volsum = 0.0, 0.0 for i in xrange(nodes.numInternalNodes): if rho[i] > self.rho0 and eps[i] < self.eps0: msum += mass[i] volsum += mass[i]/rho[i] msum = mpi.allreduce(msum, mpi.SUM) volsum = mpi.allreduce(volsum, mpi.SUM) return msum/self.M0, msum, volsum
en
0.393308
#------------------------------------------------------------------------------- # Measure the cloud mass fraction for the blob test, as in Figure 6 from # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. # Fundamental differences between SPH and grid methods. Monthly Notices of # the Royal Astronomical Society. 2007; 380(3):963-978. # doi:10.1111/j.1365-2966.2007.12183.x. # # This method is generalized for both the 2D and 3D tests. #------------------------------------------------------------------------------- #--------------------------------------------------------------------------- # Constructor #--------------------------------------------------------------------------- # initial blob radius # density cutoff # specific thermal energy cutoff # blob NodeList # file to write the results to # Check our dimensionality # Find the starting mass of the cloud. #--------------------------------------------------------------------------- # Do our measurements. #---------------------------------------------------------------------------
2.095734
2
fwd9m/tensorflow/patch_bias_add.py
rym-khettab/determinism-DL
209
6629765
# Copyright 2020 NVIDIA Corporation. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops def _patch_bias_add(): _new_bias_add.__doc__ = tf.nn.bias_add.__doc__ tf.nn.bias_add = _new_bias_add # access via public API nn.bias_add = _new_bias_add # called from tf.keras.layers.convolutional.Conv nn_ops.bias_add = _new_bias_add # called from tests # The original, pre-patched method can be viewed at # https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628 # # This patched version of bias_add does not implement some of the error checks # provided by the original op. For more information, see the list of test cases # excluded from the testing of the patched op functionality. def _new_bias_add(value, bias, data_format=None, name=None): """ERROR: docstring should have been added programatically. """ with ops.name_scope(name, "BiasAdd", [value, bias]) as name: if data_format is not None: if data_format.startswith("NC"): data_format = "NCHW" elif data_format.startswith("N") and data_format.endswith("C"): data_format = "NHWC" else: raise ValueError("data_format must be of the form `N...C` or `NC...`") if not context.executing_eagerly(): value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") if data_format == 'NCHW': broadcast_shape_head = [1, array_ops.size(bias)] broadcast_shape_tail = array_ops.ones(array_ops.rank(value) - 2, dtype=dtypes.int32) broadcast_shape = array_ops.concat( [broadcast_shape_head, broadcast_shape_tail], 0) return math_ops.add( value, array_ops.reshape(bias, broadcast_shape), name=name) else: # data_format == 'NHWC' or data_format == None return math_ops.add(value, bias, name=name)
# Copyright 2020 NVIDIA Corporation. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops def _patch_bias_add(): _new_bias_add.__doc__ = tf.nn.bias_add.__doc__ tf.nn.bias_add = _new_bias_add # access via public API nn.bias_add = _new_bias_add # called from tf.keras.layers.convolutional.Conv nn_ops.bias_add = _new_bias_add # called from tests # The original, pre-patched method can be viewed at # https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628 # # This patched version of bias_add does not implement some of the error checks # provided by the original op. For more information, see the list of test cases # excluded from the testing of the patched op functionality. def _new_bias_add(value, bias, data_format=None, name=None): """ERROR: docstring should have been added programatically. """ with ops.name_scope(name, "BiasAdd", [value, bias]) as name: if data_format is not None: if data_format.startswith("NC"): data_format = "NCHW" elif data_format.startswith("N") and data_format.endswith("C"): data_format = "NHWC" else: raise ValueError("data_format must be of the form `N...C` or `NC...`") if not context.executing_eagerly(): value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") if data_format == 'NCHW': broadcast_shape_head = [1, array_ops.size(bias)] broadcast_shape_tail = array_ops.ones(array_ops.rank(value) - 2, dtype=dtypes.int32) broadcast_shape = array_ops.concat( [broadcast_shape_head, broadcast_shape_tail], 0) return math_ops.add( value, array_ops.reshape(bias, broadcast_shape), name=name) else: # data_format == 'NHWC' or data_format == None return math_ops.add(value, bias, name=name)
en
0.84202
# Copyright 2020 NVIDIA Corporation. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== # access via public API # called from tf.keras.layers.convolutional.Conv # called from tests # The original, pre-patched method can be viewed at # https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628 # # This patched version of bias_add does not implement some of the error checks # provided by the original op. For more information, see the list of test cases # excluded from the testing of the patched op functionality. ERROR: docstring should have been added programatically. # data_format == 'NHWC' or data_format == None
1.633925
2
conftest.py
risclog-solution/batou
34
6629766
<reponame>risclog-solution/batou pytest_plugins = "batou.fixtures"
pytest_plugins = "batou.fixtures"
none
1
0.914316
1
sdk/python/pulumi_azure_native/purview/v20210701/outputs.py
polivbr/pulumi-azure-native
0
6629767
<filename>sdk/python/pulumi_azure_native/purview/v20210701/outputs.py # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * __all__ = [ 'AccountPropertiesResponseEndpoints', 'AccountPropertiesResponseManagedResources', 'AccountResponseSku', 'CloudConnectorsResponse', 'IdentityResponse', 'PrivateEndpointConnectionResponse', 'PrivateEndpointResponse', 'PrivateLinkServiceConnectionStateResponse', 'TrackedResourceResponseSystemData', ] @pulumi.output_type class AccountPropertiesResponseEndpoints(dict): """ The URIs that are the public endpoints of the account. """ def __init__(__self__, *, catalog: str, guardian: str, scan: str): """ The URIs that are the public endpoints of the account. :param str catalog: Gets the catalog endpoint. :param str guardian: Gets the guardian endpoint. :param str scan: Gets the scan endpoint. """ pulumi.set(__self__, "catalog", catalog) pulumi.set(__self__, "guardian", guardian) pulumi.set(__self__, "scan", scan) @property @pulumi.getter def catalog(self) -> str: """ Gets the catalog endpoint. """ return pulumi.get(self, "catalog") @property @pulumi.getter def guardian(self) -> str: """ Gets the guardian endpoint. """ return pulumi.get(self, "guardian") @property @pulumi.getter def scan(self) -> str: """ Gets the scan endpoint. """ return pulumi.get(self, "scan") @pulumi.output_type class AccountPropertiesResponseManagedResources(dict): """ Gets the resource identifiers of the managed resources. """ @staticmethod def __key_warning(key: str): suggest = None if key == "eventHubNamespace": suggest = "event_hub_namespace" elif key == "resourceGroup": suggest = "resource_group" elif key == "storageAccount": suggest = "storage_account" if suggest: pulumi.log.warn(f"Key '{key}' not found in AccountPropertiesResponseManagedResources. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: AccountPropertiesResponseManagedResources.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: AccountPropertiesResponseManagedResources.__key_warning(key) return super().get(key, default) def __init__(__self__, *, event_hub_namespace: str, resource_group: str, storage_account: str): """ Gets the resource identifiers of the managed resources. :param str event_hub_namespace: Gets the managed event hub namespace resource identifier. :param str resource_group: Gets the managed resource group resource identifier. This resource group will host resource dependencies for the account. :param str storage_account: Gets the managed storage account resource identifier. """ pulumi.set(__self__, "event_hub_namespace", event_hub_namespace) pulumi.set(__self__, "resource_group", resource_group) pulumi.set(__self__, "storage_account", storage_account) @property @pulumi.getter(name="eventHubNamespace") def event_hub_namespace(self) -> str: """ Gets the managed event hub namespace resource identifier. """ return pulumi.get(self, "event_hub_namespace") @property @pulumi.getter(name="resourceGroup") def resource_group(self) -> str: """ Gets the managed resource group resource identifier. This resource group will host resource dependencies for the account. """ return pulumi.get(self, "resource_group") @property @pulumi.getter(name="storageAccount") def storage_account(self) -> str: """ Gets the managed storage account resource identifier. """ return pulumi.get(self, "storage_account") @pulumi.output_type class AccountResponseSku(dict): """ Gets or sets the Sku. """ def __init__(__self__, *, capacity: Optional[int] = None, name: Optional[str] = None): """ Gets or sets the Sku. :param int capacity: Gets or sets the sku capacity. :param str name: Gets or sets the sku name. """ if capacity is not None: pulumi.set(__self__, "capacity", capacity) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def capacity(self) -> Optional[int]: """ Gets or sets the sku capacity. """ return pulumi.get(self, "capacity") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets or sets the sku name. """ return pulumi.get(self, "name") @pulumi.output_type class CloudConnectorsResponse(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "awsExternalId": suggest = "aws_external_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in CloudConnectorsResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: CloudConnectorsResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: CloudConnectorsResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, aws_external_id: str): """ :param str aws_external_id: AWS external identifier. Configured in AWS to allow use of the role arn used for scanning """ pulumi.set(__self__, "aws_external_id", aws_external_id) @property @pulumi.getter(name="awsExternalId") def aws_external_id(self) -> str: """ AWS external identifier. Configured in AWS to allow use of the role arn used for scanning """ return pulumi.get(self, "aws_external_id") @pulumi.output_type class IdentityResponse(dict): """ The Managed Identity of the resource """ @staticmethod def __key_warning(key: str): suggest = None if key == "principalId": suggest = "principal_id" elif key == "tenantId": suggest = "tenant_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in IdentityResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: IdentityResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: IdentityResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, principal_id: str, tenant_id: str, type: Optional[str] = None): """ The Managed Identity of the resource :param str principal_id: Service principal object Id :param str tenant_id: Tenant Id :param str type: Identity Type """ pulumi.set(__self__, "principal_id", principal_id) pulumi.set(__self__, "tenant_id", tenant_id) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="principalId") def principal_id(self) -> str: """ Service principal object Id """ return pulumi.get(self, "principal_id") @property @pulumi.getter(name="tenantId") def tenant_id(self) -> str: """ Tenant Id """ return pulumi.get(self, "tenant_id") @property @pulumi.getter def type(self) -> Optional[str]: """ Identity Type """ return pulumi.get(self, "type") @pulumi.output_type class PrivateEndpointConnectionResponse(dict): """ A private endpoint connection class. """ @staticmethod def __key_warning(key: str): suggest = None if key == "provisioningState": suggest = "provisioning_state" elif key == "privateEndpoint": suggest = "private_endpoint" elif key == "privateLinkServiceConnectionState": suggest = "private_link_service_connection_state" if suggest: pulumi.log.warn(f"Key '{key}' not found in PrivateEndpointConnectionResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: PrivateEndpointConnectionResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: PrivateEndpointConnectionResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, id: str, name: str, provisioning_state: str, type: str, private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None, private_link_service_connection_state: Optional['outputs.PrivateLinkServiceConnectionStateResponse'] = None): """ A private endpoint connection class. :param str id: Gets or sets the identifier. :param str name: Gets or sets the name. :param str provisioning_state: The provisioning state. :param str type: Gets or sets the type. :param 'PrivateEndpointResponse' private_endpoint: The private endpoint information. :param 'PrivateLinkServiceConnectionStateResponse' private_link_service_connection_state: The private link service connection state. """ pulumi.set(__self__, "id", id) pulumi.set(__self__, "name", name) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "type", type) if private_endpoint is not None: pulumi.set(__self__, "private_endpoint", private_endpoint) if private_link_service_connection_state is not None: pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state) @property @pulumi.getter def id(self) -> str: """ Gets or sets the identifier. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ Gets or sets the name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: """ Gets or sets the type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="privateEndpoint") def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']: """ The private endpoint information. """ return pulumi.get(self, "private_endpoint") @property @pulumi.getter(name="privateLinkServiceConnectionState") def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']: """ The private link service connection state. """ return pulumi.get(self, "private_link_service_connection_state") @pulumi.output_type class PrivateEndpointResponse(dict): """ A private endpoint class. """ def __init__(__self__, *, id: Optional[str] = None): """ A private endpoint class. :param str id: The private endpoint identifier. """ if id is not None: pulumi.set(__self__, "id", id) @property @pulumi.getter def id(self) -> Optional[str]: """ The private endpoint identifier. """ return pulumi.get(self, "id") @pulumi.output_type class PrivateLinkServiceConnectionStateResponse(dict): """ The private link service connection state. """ @staticmethod def __key_warning(key: str): suggest = None if key == "actionsRequired": suggest = "actions_required" if suggest: pulumi.log.warn(f"Key '{key}' not found in PrivateLinkServiceConnectionStateResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: PrivateLinkServiceConnectionStateResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: PrivateLinkServiceConnectionStateResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, actions_required: Optional[str] = None, description: Optional[str] = None, status: Optional[str] = None): """ The private link service connection state. :param str actions_required: The required actions. :param str description: The description. :param str status: The status. """ if actions_required is not None: pulumi.set(__self__, "actions_required", actions_required) if description is not None: pulumi.set(__self__, "description", description) if status is not None: pulumi.set(__self__, "status", status) @property @pulumi.getter(name="actionsRequired") def actions_required(self) -> Optional[str]: """ The required actions. """ return pulumi.get(self, "actions_required") @property @pulumi.getter def description(self) -> Optional[str]: """ The description. """ return pulumi.get(self, "description") @property @pulumi.getter def status(self) -> Optional[str]: """ The status. """ return pulumi.get(self, "status") @pulumi.output_type class TrackedResourceResponseSystemData(dict): """ Metadata pertaining to creation and last modification of the resource. """ @staticmethod def __key_warning(key: str): suggest = None if key == "createdAt": suggest = "created_at" elif key == "createdBy": suggest = "created_by" elif key == "createdByType": suggest = "created_by_type" elif key == "lastModifiedAt": suggest = "last_modified_at" elif key == "lastModifiedBy": suggest = "last_modified_by" elif key == "lastModifiedByType": suggest = "last_modified_by_type" if suggest: pulumi.log.warn(f"Key '{key}' not found in TrackedResourceResponseSystemData. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TrackedResourceResponseSystemData.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TrackedResourceResponseSystemData.__key_warning(key) return super().get(key, default) def __init__(__self__, *, created_at: str, created_by: str, created_by_type: str, last_modified_at: str, last_modified_by: str, last_modified_by_type: str): """ Metadata pertaining to creation and last modification of the resource. :param str created_at: The timestamp of resource creation (UTC). :param str created_by: The identity that created the resource. :param str created_by_type: The type of identity that created the resource. :param str last_modified_at: The timestamp of the last modification the resource (UTC). :param str last_modified_by: The identity that last modified the resource. :param str last_modified_by_type: The type of identity that last modified the resource. """ pulumi.set(__self__, "created_at", created_at) pulumi.set(__self__, "created_by", created_by) pulumi.set(__self__, "created_by_type", created_by_type) pulumi.set(__self__, "last_modified_at", last_modified_at) pulumi.set(__self__, "last_modified_by", last_modified_by) pulumi.set(__self__, "last_modified_by_type", last_modified_by_type) @property @pulumi.getter(name="createdAt") def created_at(self) -> str: """ The timestamp of resource creation (UTC). """ return pulumi.get(self, "created_at") @property @pulumi.getter(name="createdBy") def created_by(self) -> str: """ The identity that created the resource. """ return pulumi.get(self, "created_by") @property @pulumi.getter(name="createdByType") def created_by_type(self) -> str: """ The type of identity that created the resource. """ return pulumi.get(self, "created_by_type") @property @pulumi.getter(name="lastModifiedAt") def last_modified_at(self) -> str: """ The timestamp of the last modification the resource (UTC). """ return pulumi.get(self, "last_modified_at") @property @pulumi.getter(name="lastModifiedBy") def last_modified_by(self) -> str: """ The identity that last modified the resource. """ return pulumi.get(self, "last_modified_by") @property @pulumi.getter(name="lastModifiedByType") def last_modified_by_type(self) -> str: """ The type of identity that last modified the resource. """ return pulumi.get(self, "last_modified_by_type")
<filename>sdk/python/pulumi_azure_native/purview/v20210701/outputs.py # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * __all__ = [ 'AccountPropertiesResponseEndpoints', 'AccountPropertiesResponseManagedResources', 'AccountResponseSku', 'CloudConnectorsResponse', 'IdentityResponse', 'PrivateEndpointConnectionResponse', 'PrivateEndpointResponse', 'PrivateLinkServiceConnectionStateResponse', 'TrackedResourceResponseSystemData', ] @pulumi.output_type class AccountPropertiesResponseEndpoints(dict): """ The URIs that are the public endpoints of the account. """ def __init__(__self__, *, catalog: str, guardian: str, scan: str): """ The URIs that are the public endpoints of the account. :param str catalog: Gets the catalog endpoint. :param str guardian: Gets the guardian endpoint. :param str scan: Gets the scan endpoint. """ pulumi.set(__self__, "catalog", catalog) pulumi.set(__self__, "guardian", guardian) pulumi.set(__self__, "scan", scan) @property @pulumi.getter def catalog(self) -> str: """ Gets the catalog endpoint. """ return pulumi.get(self, "catalog") @property @pulumi.getter def guardian(self) -> str: """ Gets the guardian endpoint. """ return pulumi.get(self, "guardian") @property @pulumi.getter def scan(self) -> str: """ Gets the scan endpoint. """ return pulumi.get(self, "scan") @pulumi.output_type class AccountPropertiesResponseManagedResources(dict): """ Gets the resource identifiers of the managed resources. """ @staticmethod def __key_warning(key: str): suggest = None if key == "eventHubNamespace": suggest = "event_hub_namespace" elif key == "resourceGroup": suggest = "resource_group" elif key == "storageAccount": suggest = "storage_account" if suggest: pulumi.log.warn(f"Key '{key}' not found in AccountPropertiesResponseManagedResources. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: AccountPropertiesResponseManagedResources.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: AccountPropertiesResponseManagedResources.__key_warning(key) return super().get(key, default) def __init__(__self__, *, event_hub_namespace: str, resource_group: str, storage_account: str): """ Gets the resource identifiers of the managed resources. :param str event_hub_namespace: Gets the managed event hub namespace resource identifier. :param str resource_group: Gets the managed resource group resource identifier. This resource group will host resource dependencies for the account. :param str storage_account: Gets the managed storage account resource identifier. """ pulumi.set(__self__, "event_hub_namespace", event_hub_namespace) pulumi.set(__self__, "resource_group", resource_group) pulumi.set(__self__, "storage_account", storage_account) @property @pulumi.getter(name="eventHubNamespace") def event_hub_namespace(self) -> str: """ Gets the managed event hub namespace resource identifier. """ return pulumi.get(self, "event_hub_namespace") @property @pulumi.getter(name="resourceGroup") def resource_group(self) -> str: """ Gets the managed resource group resource identifier. This resource group will host resource dependencies for the account. """ return pulumi.get(self, "resource_group") @property @pulumi.getter(name="storageAccount") def storage_account(self) -> str: """ Gets the managed storage account resource identifier. """ return pulumi.get(self, "storage_account") @pulumi.output_type class AccountResponseSku(dict): """ Gets or sets the Sku. """ def __init__(__self__, *, capacity: Optional[int] = None, name: Optional[str] = None): """ Gets or sets the Sku. :param int capacity: Gets or sets the sku capacity. :param str name: Gets or sets the sku name. """ if capacity is not None: pulumi.set(__self__, "capacity", capacity) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def capacity(self) -> Optional[int]: """ Gets or sets the sku capacity. """ return pulumi.get(self, "capacity") @property @pulumi.getter def name(self) -> Optional[str]: """ Gets or sets the sku name. """ return pulumi.get(self, "name") @pulumi.output_type class CloudConnectorsResponse(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "awsExternalId": suggest = "aws_external_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in CloudConnectorsResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: CloudConnectorsResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: CloudConnectorsResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, aws_external_id: str): """ :param str aws_external_id: AWS external identifier. Configured in AWS to allow use of the role arn used for scanning """ pulumi.set(__self__, "aws_external_id", aws_external_id) @property @pulumi.getter(name="awsExternalId") def aws_external_id(self) -> str: """ AWS external identifier. Configured in AWS to allow use of the role arn used for scanning """ return pulumi.get(self, "aws_external_id") @pulumi.output_type class IdentityResponse(dict): """ The Managed Identity of the resource """ @staticmethod def __key_warning(key: str): suggest = None if key == "principalId": suggest = "principal_id" elif key == "tenantId": suggest = "tenant_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in IdentityResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: IdentityResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: IdentityResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, principal_id: str, tenant_id: str, type: Optional[str] = None): """ The Managed Identity of the resource :param str principal_id: Service principal object Id :param str tenant_id: Tenant Id :param str type: Identity Type """ pulumi.set(__self__, "principal_id", principal_id) pulumi.set(__self__, "tenant_id", tenant_id) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="principalId") def principal_id(self) -> str: """ Service principal object Id """ return pulumi.get(self, "principal_id") @property @pulumi.getter(name="tenantId") def tenant_id(self) -> str: """ Tenant Id """ return pulumi.get(self, "tenant_id") @property @pulumi.getter def type(self) -> Optional[str]: """ Identity Type """ return pulumi.get(self, "type") @pulumi.output_type class PrivateEndpointConnectionResponse(dict): """ A private endpoint connection class. """ @staticmethod def __key_warning(key: str): suggest = None if key == "provisioningState": suggest = "provisioning_state" elif key == "privateEndpoint": suggest = "private_endpoint" elif key == "privateLinkServiceConnectionState": suggest = "private_link_service_connection_state" if suggest: pulumi.log.warn(f"Key '{key}' not found in PrivateEndpointConnectionResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: PrivateEndpointConnectionResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: PrivateEndpointConnectionResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, id: str, name: str, provisioning_state: str, type: str, private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None, private_link_service_connection_state: Optional['outputs.PrivateLinkServiceConnectionStateResponse'] = None): """ A private endpoint connection class. :param str id: Gets or sets the identifier. :param str name: Gets or sets the name. :param str provisioning_state: The provisioning state. :param str type: Gets or sets the type. :param 'PrivateEndpointResponse' private_endpoint: The private endpoint information. :param 'PrivateLinkServiceConnectionStateResponse' private_link_service_connection_state: The private link service connection state. """ pulumi.set(__self__, "id", id) pulumi.set(__self__, "name", name) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "type", type) if private_endpoint is not None: pulumi.set(__self__, "private_endpoint", private_endpoint) if private_link_service_connection_state is not None: pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state) @property @pulumi.getter def id(self) -> str: """ Gets or sets the identifier. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ Gets or sets the name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: """ Gets or sets the type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="privateEndpoint") def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']: """ The private endpoint information. """ return pulumi.get(self, "private_endpoint") @property @pulumi.getter(name="privateLinkServiceConnectionState") def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']: """ The private link service connection state. """ return pulumi.get(self, "private_link_service_connection_state") @pulumi.output_type class PrivateEndpointResponse(dict): """ A private endpoint class. """ def __init__(__self__, *, id: Optional[str] = None): """ A private endpoint class. :param str id: The private endpoint identifier. """ if id is not None: pulumi.set(__self__, "id", id) @property @pulumi.getter def id(self) -> Optional[str]: """ The private endpoint identifier. """ return pulumi.get(self, "id") @pulumi.output_type class PrivateLinkServiceConnectionStateResponse(dict): """ The private link service connection state. """ @staticmethod def __key_warning(key: str): suggest = None if key == "actionsRequired": suggest = "actions_required" if suggest: pulumi.log.warn(f"Key '{key}' not found in PrivateLinkServiceConnectionStateResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: PrivateLinkServiceConnectionStateResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: PrivateLinkServiceConnectionStateResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, actions_required: Optional[str] = None, description: Optional[str] = None, status: Optional[str] = None): """ The private link service connection state. :param str actions_required: The required actions. :param str description: The description. :param str status: The status. """ if actions_required is not None: pulumi.set(__self__, "actions_required", actions_required) if description is not None: pulumi.set(__self__, "description", description) if status is not None: pulumi.set(__self__, "status", status) @property @pulumi.getter(name="actionsRequired") def actions_required(self) -> Optional[str]: """ The required actions. """ return pulumi.get(self, "actions_required") @property @pulumi.getter def description(self) -> Optional[str]: """ The description. """ return pulumi.get(self, "description") @property @pulumi.getter def status(self) -> Optional[str]: """ The status. """ return pulumi.get(self, "status") @pulumi.output_type class TrackedResourceResponseSystemData(dict): """ Metadata pertaining to creation and last modification of the resource. """ @staticmethod def __key_warning(key: str): suggest = None if key == "createdAt": suggest = "created_at" elif key == "createdBy": suggest = "created_by" elif key == "createdByType": suggest = "created_by_type" elif key == "lastModifiedAt": suggest = "last_modified_at" elif key == "lastModifiedBy": suggest = "last_modified_by" elif key == "lastModifiedByType": suggest = "last_modified_by_type" if suggest: pulumi.log.warn(f"Key '{key}' not found in TrackedResourceResponseSystemData. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TrackedResourceResponseSystemData.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TrackedResourceResponseSystemData.__key_warning(key) return super().get(key, default) def __init__(__self__, *, created_at: str, created_by: str, created_by_type: str, last_modified_at: str, last_modified_by: str, last_modified_by_type: str): """ Metadata pertaining to creation and last modification of the resource. :param str created_at: The timestamp of resource creation (UTC). :param str created_by: The identity that created the resource. :param str created_by_type: The type of identity that created the resource. :param str last_modified_at: The timestamp of the last modification the resource (UTC). :param str last_modified_by: The identity that last modified the resource. :param str last_modified_by_type: The type of identity that last modified the resource. """ pulumi.set(__self__, "created_at", created_at) pulumi.set(__self__, "created_by", created_by) pulumi.set(__self__, "created_by_type", created_by_type) pulumi.set(__self__, "last_modified_at", last_modified_at) pulumi.set(__self__, "last_modified_by", last_modified_by) pulumi.set(__self__, "last_modified_by_type", last_modified_by_type) @property @pulumi.getter(name="createdAt") def created_at(self) -> str: """ The timestamp of resource creation (UTC). """ return pulumi.get(self, "created_at") @property @pulumi.getter(name="createdBy") def created_by(self) -> str: """ The identity that created the resource. """ return pulumi.get(self, "created_by") @property @pulumi.getter(name="createdByType") def created_by_type(self) -> str: """ The type of identity that created the resource. """ return pulumi.get(self, "created_by_type") @property @pulumi.getter(name="lastModifiedAt") def last_modified_at(self) -> str: """ The timestamp of the last modification the resource (UTC). """ return pulumi.get(self, "last_modified_at") @property @pulumi.getter(name="lastModifiedBy") def last_modified_by(self) -> str: """ The identity that last modified the resource. """ return pulumi.get(self, "last_modified_by") @property @pulumi.getter(name="lastModifiedByType") def last_modified_by_type(self) -> str: """ The type of identity that last modified the resource. """ return pulumi.get(self, "last_modified_by_type")
en
0.729838
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The URIs that are the public endpoints of the account. The URIs that are the public endpoints of the account. :param str catalog: Gets the catalog endpoint. :param str guardian: Gets the guardian endpoint. :param str scan: Gets the scan endpoint. Gets the catalog endpoint. Gets the guardian endpoint. Gets the scan endpoint. Gets the resource identifiers of the managed resources. Gets the resource identifiers of the managed resources. :param str event_hub_namespace: Gets the managed event hub namespace resource identifier. :param str resource_group: Gets the managed resource group resource identifier. This resource group will host resource dependencies for the account. :param str storage_account: Gets the managed storage account resource identifier. Gets the managed event hub namespace resource identifier. Gets the managed resource group resource identifier. This resource group will host resource dependencies for the account. Gets the managed storage account resource identifier. Gets or sets the Sku. Gets or sets the Sku. :param int capacity: Gets or sets the sku capacity. :param str name: Gets or sets the sku name. Gets or sets the sku capacity. Gets or sets the sku name. :param str aws_external_id: AWS external identifier. Configured in AWS to allow use of the role arn used for scanning AWS external identifier. Configured in AWS to allow use of the role arn used for scanning The Managed Identity of the resource The Managed Identity of the resource :param str principal_id: Service principal object Id :param str tenant_id: Tenant Id :param str type: Identity Type Service principal object Id Tenant Id Identity Type A private endpoint connection class. A private endpoint connection class. :param str id: Gets or sets the identifier. :param str name: Gets or sets the name. :param str provisioning_state: The provisioning state. :param str type: Gets or sets the type. :param 'PrivateEndpointResponse' private_endpoint: The private endpoint information. :param 'PrivateLinkServiceConnectionStateResponse' private_link_service_connection_state: The private link service connection state. Gets or sets the identifier. Gets or sets the name. The provisioning state. Gets or sets the type. The private endpoint information. The private link service connection state. A private endpoint class. A private endpoint class. :param str id: The private endpoint identifier. The private endpoint identifier. The private link service connection state. The private link service connection state. :param str actions_required: The required actions. :param str description: The description. :param str status: The status. The required actions. The description. The status. Metadata pertaining to creation and last modification of the resource. Metadata pertaining to creation and last modification of the resource. :param str created_at: The timestamp of resource creation (UTC). :param str created_by: The identity that created the resource. :param str created_by_type: The type of identity that created the resource. :param str last_modified_at: The timestamp of the last modification the resource (UTC). :param str last_modified_by: The identity that last modified the resource. :param str last_modified_by_type: The type of identity that last modified the resource. The timestamp of resource creation (UTC). The identity that created the resource. The type of identity that created the resource. The timestamp of the last modification the resource (UTC). The identity that last modified the resource. The type of identity that last modified the resource.
1.818208
2
src/sardana/macroserver/test/test_msrecordermanager.py
marc2332/sardana
43
6629768
<reponame>marc2332/sardana #!/usr/bin/env python ############################################################################## ## # This file is part of Sardana ## # http://www.sardana-controls.org/ ## # Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## # Sardana is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. ## # Sardana is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. ## # You should have received a copy of the GNU Lesser General Public License # along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## import os import unittest from taurus.test import insertTest from sardana.macroserver.macroserver import MacroServer from sardana.macroserver.scan.recorder import DataRecorder from sardana.macroserver.scan.recorder.storage import FileRecorder,\ BaseFileRecorder _TEST_DIR = os.path.dirname(os.path.abspath(__file__)) _FAKE_RECORDER_DIR = os.path.join(_TEST_DIR, 'res') @insertTest(helper_name='getRecorderClass', klass_name="JsonRecorder") @insertTest(helper_name='getRecorderClass', klass_name="FIO_FileRecorder") @insertTest(helper_name='getRecorderClass', klass_name="FakeScanRecorder", extra_paths=[_FAKE_RECORDER_DIR]) @insertTest(helper_name='getRecorderClasses', filter=DataRecorder, extra_paths=[_FAKE_RECORDER_DIR], extra_recorders=1) @insertTest(helper_name='getRecorderClasses', extra_paths=[_FAKE_RECORDER_DIR], extra_recorders=1) @insertTest(helper_name='getRecorderPath', recorder_path=["/tmp/foo", "#/tmp/foo2"], expected_num_path=2) @insertTest(helper_name='getRecorderPath', recorder_path=["/tmp/foo" + os.pathsep + "/tmp/foo2"], expected_num_path=3) @insertTest(helper_name='getRecorderPath', recorder_path=["/tmp/foo"], expected_num_path=2) @insertTest(helper_name='getRecorderPath') class RecorderManagerTest(unittest.TestCase): # Just an hardcode fullname for create an instance of MacroServer. # This macroserver does not need to be defined. ms_fullname = "macroserver/demo1/1" def setUp(self): name = self.ms_fullname.split("/")[1] self._macro_server = MacroServer(self.ms_fullname, name, macro_path=[], recorder_path=[]) self.manager = self._macro_server.recorder_manager def tearDown(self): pass def _updateRecorderManager(self, recorder_path): """Helper for update the sardana recorder manager """ self.manager.setRecorderPath(recorder_path) def getRecorderPath(self, recorder_path=[], expected_num_path=1): """Helper for test the number of reading recorder paths. The number of reading path sould be len(recorder_path) + 1 """ if recorder_path is not []: self._updateRecorderManager(recorder_path) # Get the list of recorder path(s) paths = self.manager.getRecorderPath() num_paths = len(paths) msg = "The number of paths do not concur, read %d, expected %d" %\ (num_paths, expected_num_path) self.assertEqual(num_paths, expected_num_path, msg) def getRecorderClasses(self, filter=None, extra_paths=None, extra_recorders=0): """Helper for test getRecorderClasses method of the record Manager. """ if filter is None: filter = DataRecorder # Use default recorders paths self.manager.setRecorderPath([]) default_recorder_klass = self.manager.getRecorderClasses(filter) # Add extra recorders paths if extra_paths is not None: self.manager.setRecorderPath(extra_paths) recorder_klass = self.manager.getRecorderClasses(filter) n_default_recorders = len(default_recorder_klass) n_recorders = len(recorder_klass) total_recorders = n_default_recorders + extra_recorders msg = "Number of recorder classes do not concur, expected %d, get %d" %\ (total_recorders, n_recorders) self.assertEqual(total_recorders, n_recorders, msg) def getRecorderClass(self, klass_name, extra_paths=[]): """Helper for test getRecorderClass method of the record Manager. """ self.manager.setRecorderPath(extra_paths) klass = self.manager.getRecorderClass(klass_name) msg = "Recoder manager does not found the class %s" % (klass_name) self.assertNotEqual(klass, None, msg) _name = klass.__name__ msg = "The class %s is not subclass of DataRecorder" % (_name) self.assertTrue(issubclass(klass, DataRecorder), msg) msg = "The class name giveb by the recorder manager is different." +\ "Expected %s, get %s" % (klass_name, _name) self.assertEqual(_name, klass_name, msg) def test_SameClassNames(self): """Test whether ordered path precedence is maintained in case of different recorder classes with the same name located in different paths. """ path1 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path1') path2 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path2') path3 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path3') # set three paths containing recorders with the same class names recorder_path = [path3, path1, path2] self._updateRecorderManager(recorder_path) klass = self.manager.getRecorderMetaClass('FakeScanRecorder') # retrieve path to the recorder library path = os.sep.join(klass.lib.full_name.split(os.sep)[:-1]) msg = 'Ordered path precedence is not maintained by RecorderManager' self.assertEqual(path3, path, msg) def test_SameFormats(self): """Test whether ordered path precedence is maintained in case of different recorder classes supporting the same format located in different paths. """ path1 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path1') path2 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path2') path3 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path3') recorder_path = [path3, path1, path2] # set three paths containing recorders of the same format self._updateRecorderManager(recorder_path) klasses = self.manager.getRecorderMetaClasses(filter=BaseFileRecorder, extension='.spec') klass = list(klasses.values())[0] # retrieve path to the recorder library path = os.sep.join(klass.lib.full_name.split(os.sep)[:-1]) msg = 'Ordered path precedence is not maintained by RecorderManager' self.assertEqual(path3, path, msg) def test_ExternalVsBuiltinPrecedence(self): """Test if external recorders are of higher priority than the built-in) """ external_path = os.path.join(_TEST_DIR, 'res', 'recorders', 'pathexternal') # set three paths containing recorders with the same class names recorder_path = [external_path] self._updateRecorderManager(recorder_path) klass = self.manager.getRecorderMetaClass('SPEC_FileRecorder') # retrieve path to the recorder library path = os.sep.join(klass.lib.full_name.split(os.sep)[:-1]) msg = 'Wrong precedence of recorder paths' self.assertEqual(path, external_path, msg)
#!/usr/bin/env python ############################################################################## ## # This file is part of Sardana ## # http://www.sardana-controls.org/ ## # Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## # Sardana is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. ## # Sardana is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. ## # You should have received a copy of the GNU Lesser General Public License # along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## import os import unittest from taurus.test import insertTest from sardana.macroserver.macroserver import MacroServer from sardana.macroserver.scan.recorder import DataRecorder from sardana.macroserver.scan.recorder.storage import FileRecorder,\ BaseFileRecorder _TEST_DIR = os.path.dirname(os.path.abspath(__file__)) _FAKE_RECORDER_DIR = os.path.join(_TEST_DIR, 'res') @insertTest(helper_name='getRecorderClass', klass_name="JsonRecorder") @insertTest(helper_name='getRecorderClass', klass_name="FIO_FileRecorder") @insertTest(helper_name='getRecorderClass', klass_name="FakeScanRecorder", extra_paths=[_FAKE_RECORDER_DIR]) @insertTest(helper_name='getRecorderClasses', filter=DataRecorder, extra_paths=[_FAKE_RECORDER_DIR], extra_recorders=1) @insertTest(helper_name='getRecorderClasses', extra_paths=[_FAKE_RECORDER_DIR], extra_recorders=1) @insertTest(helper_name='getRecorderPath', recorder_path=["/tmp/foo", "#/tmp/foo2"], expected_num_path=2) @insertTest(helper_name='getRecorderPath', recorder_path=["/tmp/foo" + os.pathsep + "/tmp/foo2"], expected_num_path=3) @insertTest(helper_name='getRecorderPath', recorder_path=["/tmp/foo"], expected_num_path=2) @insertTest(helper_name='getRecorderPath') class RecorderManagerTest(unittest.TestCase): # Just an hardcode fullname for create an instance of MacroServer. # This macroserver does not need to be defined. ms_fullname = "macroserver/demo1/1" def setUp(self): name = self.ms_fullname.split("/")[1] self._macro_server = MacroServer(self.ms_fullname, name, macro_path=[], recorder_path=[]) self.manager = self._macro_server.recorder_manager def tearDown(self): pass def _updateRecorderManager(self, recorder_path): """Helper for update the sardana recorder manager """ self.manager.setRecorderPath(recorder_path) def getRecorderPath(self, recorder_path=[], expected_num_path=1): """Helper for test the number of reading recorder paths. The number of reading path sould be len(recorder_path) + 1 """ if recorder_path is not []: self._updateRecorderManager(recorder_path) # Get the list of recorder path(s) paths = self.manager.getRecorderPath() num_paths = len(paths) msg = "The number of paths do not concur, read %d, expected %d" %\ (num_paths, expected_num_path) self.assertEqual(num_paths, expected_num_path, msg) def getRecorderClasses(self, filter=None, extra_paths=None, extra_recorders=0): """Helper for test getRecorderClasses method of the record Manager. """ if filter is None: filter = DataRecorder # Use default recorders paths self.manager.setRecorderPath([]) default_recorder_klass = self.manager.getRecorderClasses(filter) # Add extra recorders paths if extra_paths is not None: self.manager.setRecorderPath(extra_paths) recorder_klass = self.manager.getRecorderClasses(filter) n_default_recorders = len(default_recorder_klass) n_recorders = len(recorder_klass) total_recorders = n_default_recorders + extra_recorders msg = "Number of recorder classes do not concur, expected %d, get %d" %\ (total_recorders, n_recorders) self.assertEqual(total_recorders, n_recorders, msg) def getRecorderClass(self, klass_name, extra_paths=[]): """Helper for test getRecorderClass method of the record Manager. """ self.manager.setRecorderPath(extra_paths) klass = self.manager.getRecorderClass(klass_name) msg = "Recoder manager does not found the class %s" % (klass_name) self.assertNotEqual(klass, None, msg) _name = klass.__name__ msg = "The class %s is not subclass of DataRecorder" % (_name) self.assertTrue(issubclass(klass, DataRecorder), msg) msg = "The class name giveb by the recorder manager is different." +\ "Expected %s, get %s" % (klass_name, _name) self.assertEqual(_name, klass_name, msg) def test_SameClassNames(self): """Test whether ordered path precedence is maintained in case of different recorder classes with the same name located in different paths. """ path1 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path1') path2 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path2') path3 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path3') # set three paths containing recorders with the same class names recorder_path = [path3, path1, path2] self._updateRecorderManager(recorder_path) klass = self.manager.getRecorderMetaClass('FakeScanRecorder') # retrieve path to the recorder library path = os.sep.join(klass.lib.full_name.split(os.sep)[:-1]) msg = 'Ordered path precedence is not maintained by RecorderManager' self.assertEqual(path3, path, msg) def test_SameFormats(self): """Test whether ordered path precedence is maintained in case of different recorder classes supporting the same format located in different paths. """ path1 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path1') path2 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path2') path3 = os.path.join(_TEST_DIR, 'res', 'recorders', 'path3') recorder_path = [path3, path1, path2] # set three paths containing recorders of the same format self._updateRecorderManager(recorder_path) klasses = self.manager.getRecorderMetaClasses(filter=BaseFileRecorder, extension='.spec') klass = list(klasses.values())[0] # retrieve path to the recorder library path = os.sep.join(klass.lib.full_name.split(os.sep)[:-1]) msg = 'Ordered path precedence is not maintained by RecorderManager' self.assertEqual(path3, path, msg) def test_ExternalVsBuiltinPrecedence(self): """Test if external recorders are of higher priority than the built-in) """ external_path = os.path.join(_TEST_DIR, 'res', 'recorders', 'pathexternal') # set three paths containing recorders with the same class names recorder_path = [external_path] self._updateRecorderManager(recorder_path) klass = self.manager.getRecorderMetaClass('SPEC_FileRecorder') # retrieve path to the recorder library path = os.sep.join(klass.lib.full_name.split(os.sep)[:-1]) msg = 'Wrong precedence of recorder paths' self.assertEqual(path, external_path, msg)
en
0.77683
#!/usr/bin/env python ############################################################################## ## # This file is part of Sardana ## # http://www.sardana-controls.org/ ## # Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## # Sardana is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. ## # Sardana is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. ## # You should have received a copy of the GNU Lesser General Public License # along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## # Just an hardcode fullname for create an instance of MacroServer. # This macroserver does not need to be defined. Helper for update the sardana recorder manager Helper for test the number of reading recorder paths. The number of reading path sould be len(recorder_path) + 1 # Get the list of recorder path(s) Helper for test getRecorderClasses method of the record Manager. # Use default recorders paths # Add extra recorders paths Helper for test getRecorderClass method of the record Manager. Test whether ordered path precedence is maintained in case of different recorder classes with the same name located in different paths. # set three paths containing recorders with the same class names # retrieve path to the recorder library Test whether ordered path precedence is maintained in case of different recorder classes supporting the same format located in different paths. # set three paths containing recorders of the same format # retrieve path to the recorder library Test if external recorders are of higher priority than the built-in) # set three paths containing recorders with the same class names # retrieve path to the recorder library
1.618479
2
experiment/examine_effectiveness_sync.py
i14kwmr/python_source_separation
0
6629769
<gh_stars>0 # 順列計算に使用 import argparse import time import scipy.signal as sp from util import * from util_bss import * from util_sim import * parser = argparse.ArgumentParser() parser.add_argument("sro", type=float, default=0.0) # 単位はppm args = parser.parse_args() def examine_effectiveness_sync(): # 乱数の種を初期化 np.random.seed(0) # 畳み込みに用いる音声波形 clean_wave_files = [ "../CMU_ARCTIC/cmu_us_aew_arctic/wav/arctic_a0001.wav", "../CMU_ARCTIC/cmu_us_axb_arctic/wav/arctic_a0002.wav", ] # 音源数 n_sources = len(clean_wave_files) # 長さを調べる n_samples = 0 # ファイルを読み込む for clean_wave_file in clean_wave_files: wav = wave.open(clean_wave_file) if n_samples < wav.getnframes(): n_samples = wav.getnframes() wav.close() clean_data = np.zeros([n_sources, n_samples]) # ファイルを読み込む s = 0 for clean_wave_file in clean_wave_files: wav = wave.open(clean_wave_file) data = wav.readframes(wav.getnframes()) data = np.frombuffer(data, dtype=np.int16) data = data / np.iinfo(np.int16).max clean_data[s, : wav.getnframes()] = data wav.close() s = s + 1 sample_rate = 16000 # サンプリング周波数 N = 1024 # フレームサイズ Nk = int(N / 2 + 1) # 周波数の数 # シミュレーションのパラメータ ( multi_conv_data, multi_conv_data_left_no_noise, multi_conv_data_right_no_noise, ) = simulate(clean_data, sample_rate) # リサンプリング sro = args.sro * 1e-6 # 0 multi_conv_data = resample_signals(multi_conv_data, sample_rate, sro) multi_conv_data_left_no_noise = resample_signals( multi_conv_data_left_no_noise, sample_rate, sro ) multi_conv_data_right_no_noise = resample_signals( multi_conv_data_right_no_noise, sample_rate, sro ) # 畳み込んだ波形をファイルに書き込む write_file_from_time_signal( multi_conv_data_left_no_noise[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_left_clean.wav", sample_rate, ) # 畳み込んだ波形をファイルに書き込む write_file_from_time_signal( multi_conv_data_right_no_noise[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_right_clean.wav", sample_rate, ) # 畳み込んだ波形をファイルに書き込む write_file_from_time_signal( multi_conv_data[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_in_left.wav", sample_rate, ) write_file_from_time_signal( multi_conv_data[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_in_right.wav", sample_rate, ) # 短時間フーリエ変換を行う f, t, stft_data = sp.stft(multi_conv_data, fs=sample_rate, window="hann", nperseg=N) # ICAの繰り返し回数 n_ica_iterations = 50 # ICAの分離フィルタを初期化 Wica = np.zeros(shape=(Nk, n_sources, n_sources), dtype=complex) Wica = Wica + np.eye(n_sources)[None, ...] Wiva = Wica.copy() Wiva_ip = Wica.copy() start_time = time.time() # 自然勾配法に基づくIVA実行コード(引数に与える関数を変更するだけ) Wiva, s_iva, cost_buff_iva = execute_natural_gradient_ica( stft_data, Wiva, phi_func=phi_multivariate_laplacian, contrast_func=contrast_multivariate_laplacian, mu=0.1, n_ica_iterations=n_ica_iterations, is_use_non_holonomic=False, ) y_iva = projection_back(s_iva, Wiva) iva_time = time.time() # IP法に基づくIVA実行コード(引数に与える関数を変更するだけ) Wiva_ip, s_iva_ip, cost_buff_iva_ip = execute_ip_multivariate_laplacian_iva( stft_data, Wiva_ip, n_iterations=n_ica_iterations ) y_iva_ip = projection_back(s_iva_ip, Wiva_ip) iva_ip_time = time.time() Wica, s_ica, cost_buff_ica = execute_natural_gradient_ica( stft_data, Wica, mu=0.1, n_ica_iterations=n_ica_iterations, is_use_non_holonomic=False, ) permutation_index_result = solver_inter_frequency_permutation(s_ica) y_ica = projection_back(s_ica, Wica) # パーミュテーションを解く for k in range(Nk): y_ica[:, :, k, :] = y_ica[:, permutation_index_result[k], k, :] ica_time = time.time() t, y_ica = sp.istft(y_ica[0, ...], fs=sample_rate, window="hann", nperseg=N) t, y_iva = sp.istft(y_iva[0, ...], fs=sample_rate, window="hann", nperseg=N) t, y_iva_ip = sp.istft(y_iva_ip[0, ...], fs=sample_rate, window="hann", nperseg=N) snr_pre = calculate_snr( multi_conv_data_left_no_noise[0, ...], multi_conv_data[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], multi_conv_data[0, ...]) snr_pre /= 2.0 snr_ica_post1 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_ica[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_ica[1, ...]) snr_ica_post2 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_ica[1, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_ica[0, ...]) snr_ica_post = np.maximum(snr_ica_post1, snr_ica_post2) snr_ica_post /= 2.0 snr_iva_post1 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva[1, ...]) snr_iva_post2 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva[1, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva[0, ...]) snr_iva_post = np.maximum(snr_iva_post1, snr_iva_post2) snr_iva_post /= 2.0 snr_iva_ip_post1 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva_ip[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva_ip[1, ...]) snr_iva_ip_post2 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva_ip[1, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva_ip[0, ...]) snr_iva_ip_post = np.maximum(snr_iva_ip_post1, snr_iva_ip_post2) snr_iva_ip_post /= 2.0 write_file_from_time_signal( y_ica[0, ...] * np.iinfo(np.int16).max / 20.0, "./ica_1.wav", sample_rate ) write_file_from_time_signal( y_ica[1, ...] * np.iinfo(np.int16).max / 20.0, "./ica_2.wav", sample_rate ) write_file_from_time_signal( y_iva[0, ...] * np.iinfo(np.int16).max / 20.0, "./iva_1.wav", sample_rate ) write_file_from_time_signal( y_iva[1, ...] * np.iinfo(np.int16).max / 20.0, "./iva_2.wav", sample_rate ) write_file_from_time_signal( y_iva_ip[0, ...] * np.iinfo(np.int16).max / 20.0, "./iva_ip_1.wav", sample_rate ) write_file_from_time_signal( y_iva_ip[1, ...] * np.iinfo(np.int16).max / 20.0, "./iva_ip_2.wav", sample_rate ) print("method: ", "NG-ICA", "NG-IVA", "AuxIVA") print( "処理時間[sec]: {:.2f} {:.2f} {:.2f}".format( ica_time - iva_ip_time, iva_ip_time - iva_time, iva_time - start_time ) ) print( "Δsnr [dB]: {:.2f} {:.2f} {:.2f}".format( snr_ica_post - snr_pre, snr_iva_post - snr_pre, snr_iva_ip_post - snr_pre ) ) # コストの値を表示 # for t in range(n_ica_iterations): # print(t,cost_buff_ica[t],cost_buff_iva[t],cost_buff_iva_ip[t]) if __name__ == "__main__": examine_effectiveness_sync()
# 順列計算に使用 import argparse import time import scipy.signal as sp from util import * from util_bss import * from util_sim import * parser = argparse.ArgumentParser() parser.add_argument("sro", type=float, default=0.0) # 単位はppm args = parser.parse_args() def examine_effectiveness_sync(): # 乱数の種を初期化 np.random.seed(0) # 畳み込みに用いる音声波形 clean_wave_files = [ "../CMU_ARCTIC/cmu_us_aew_arctic/wav/arctic_a0001.wav", "../CMU_ARCTIC/cmu_us_axb_arctic/wav/arctic_a0002.wav", ] # 音源数 n_sources = len(clean_wave_files) # 長さを調べる n_samples = 0 # ファイルを読み込む for clean_wave_file in clean_wave_files: wav = wave.open(clean_wave_file) if n_samples < wav.getnframes(): n_samples = wav.getnframes() wav.close() clean_data = np.zeros([n_sources, n_samples]) # ファイルを読み込む s = 0 for clean_wave_file in clean_wave_files: wav = wave.open(clean_wave_file) data = wav.readframes(wav.getnframes()) data = np.frombuffer(data, dtype=np.int16) data = data / np.iinfo(np.int16).max clean_data[s, : wav.getnframes()] = data wav.close() s = s + 1 sample_rate = 16000 # サンプリング周波数 N = 1024 # フレームサイズ Nk = int(N / 2 + 1) # 周波数の数 # シミュレーションのパラメータ ( multi_conv_data, multi_conv_data_left_no_noise, multi_conv_data_right_no_noise, ) = simulate(clean_data, sample_rate) # リサンプリング sro = args.sro * 1e-6 # 0 multi_conv_data = resample_signals(multi_conv_data, sample_rate, sro) multi_conv_data_left_no_noise = resample_signals( multi_conv_data_left_no_noise, sample_rate, sro ) multi_conv_data_right_no_noise = resample_signals( multi_conv_data_right_no_noise, sample_rate, sro ) # 畳み込んだ波形をファイルに書き込む write_file_from_time_signal( multi_conv_data_left_no_noise[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_left_clean.wav", sample_rate, ) # 畳み込んだ波形をファイルに書き込む write_file_from_time_signal( multi_conv_data_right_no_noise[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_right_clean.wav", sample_rate, ) # 畳み込んだ波形をファイルに書き込む write_file_from_time_signal( multi_conv_data[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_in_left.wav", sample_rate, ) write_file_from_time_signal( multi_conv_data[0, :] * np.iinfo(np.int16).max / 20.0, "./ica_in_right.wav", sample_rate, ) # 短時間フーリエ変換を行う f, t, stft_data = sp.stft(multi_conv_data, fs=sample_rate, window="hann", nperseg=N) # ICAの繰り返し回数 n_ica_iterations = 50 # ICAの分離フィルタを初期化 Wica = np.zeros(shape=(Nk, n_sources, n_sources), dtype=complex) Wica = Wica + np.eye(n_sources)[None, ...] Wiva = Wica.copy() Wiva_ip = Wica.copy() start_time = time.time() # 自然勾配法に基づくIVA実行コード(引数に与える関数を変更するだけ) Wiva, s_iva, cost_buff_iva = execute_natural_gradient_ica( stft_data, Wiva, phi_func=phi_multivariate_laplacian, contrast_func=contrast_multivariate_laplacian, mu=0.1, n_ica_iterations=n_ica_iterations, is_use_non_holonomic=False, ) y_iva = projection_back(s_iva, Wiva) iva_time = time.time() # IP法に基づくIVA実行コード(引数に与える関数を変更するだけ) Wiva_ip, s_iva_ip, cost_buff_iva_ip = execute_ip_multivariate_laplacian_iva( stft_data, Wiva_ip, n_iterations=n_ica_iterations ) y_iva_ip = projection_back(s_iva_ip, Wiva_ip) iva_ip_time = time.time() Wica, s_ica, cost_buff_ica = execute_natural_gradient_ica( stft_data, Wica, mu=0.1, n_ica_iterations=n_ica_iterations, is_use_non_holonomic=False, ) permutation_index_result = solver_inter_frequency_permutation(s_ica) y_ica = projection_back(s_ica, Wica) # パーミュテーションを解く for k in range(Nk): y_ica[:, :, k, :] = y_ica[:, permutation_index_result[k], k, :] ica_time = time.time() t, y_ica = sp.istft(y_ica[0, ...], fs=sample_rate, window="hann", nperseg=N) t, y_iva = sp.istft(y_iva[0, ...], fs=sample_rate, window="hann", nperseg=N) t, y_iva_ip = sp.istft(y_iva_ip[0, ...], fs=sample_rate, window="hann", nperseg=N) snr_pre = calculate_snr( multi_conv_data_left_no_noise[0, ...], multi_conv_data[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], multi_conv_data[0, ...]) snr_pre /= 2.0 snr_ica_post1 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_ica[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_ica[1, ...]) snr_ica_post2 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_ica[1, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_ica[0, ...]) snr_ica_post = np.maximum(snr_ica_post1, snr_ica_post2) snr_ica_post /= 2.0 snr_iva_post1 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva[1, ...]) snr_iva_post2 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva[1, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva[0, ...]) snr_iva_post = np.maximum(snr_iva_post1, snr_iva_post2) snr_iva_post /= 2.0 snr_iva_ip_post1 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva_ip[0, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva_ip[1, ...]) snr_iva_ip_post2 = calculate_snr( multi_conv_data_left_no_noise[0, ...], y_iva_ip[1, ...] ) + calculate_snr(multi_conv_data_right_no_noise[0, ...], y_iva_ip[0, ...]) snr_iva_ip_post = np.maximum(snr_iva_ip_post1, snr_iva_ip_post2) snr_iva_ip_post /= 2.0 write_file_from_time_signal( y_ica[0, ...] * np.iinfo(np.int16).max / 20.0, "./ica_1.wav", sample_rate ) write_file_from_time_signal( y_ica[1, ...] * np.iinfo(np.int16).max / 20.0, "./ica_2.wav", sample_rate ) write_file_from_time_signal( y_iva[0, ...] * np.iinfo(np.int16).max / 20.0, "./iva_1.wav", sample_rate ) write_file_from_time_signal( y_iva[1, ...] * np.iinfo(np.int16).max / 20.0, "./iva_2.wav", sample_rate ) write_file_from_time_signal( y_iva_ip[0, ...] * np.iinfo(np.int16).max / 20.0, "./iva_ip_1.wav", sample_rate ) write_file_from_time_signal( y_iva_ip[1, ...] * np.iinfo(np.int16).max / 20.0, "./iva_ip_2.wav", sample_rate ) print("method: ", "NG-ICA", "NG-IVA", "AuxIVA") print( "処理時間[sec]: {:.2f} {:.2f} {:.2f}".format( ica_time - iva_ip_time, iva_ip_time - iva_time, iva_time - start_time ) ) print( "Δsnr [dB]: {:.2f} {:.2f} {:.2f}".format( snr_ica_post - snr_pre, snr_iva_post - snr_pre, snr_iva_ip_post - snr_pre ) ) # コストの値を表示 # for t in range(n_ica_iterations): # print(t,cost_buff_ica[t],cost_buff_iva[t],cost_buff_iva_ip[t]) if __name__ == "__main__": examine_effectiveness_sync()
ja
0.998375
# 順列計算に使用 # 単位はppm # 乱数の種を初期化 # 畳み込みに用いる音声波形 # 音源数 # 長さを調べる # ファイルを読み込む # ファイルを読み込む # サンプリング周波数 # フレームサイズ # 周波数の数 # シミュレーションのパラメータ # リサンプリング # 0 # 畳み込んだ波形をファイルに書き込む # 畳み込んだ波形をファイルに書き込む # 畳み込んだ波形をファイルに書き込む # 短時間フーリエ変換を行う # ICAの繰り返し回数 # ICAの分離フィルタを初期化 # 自然勾配法に基づくIVA実行コード(引数に与える関数を変更するだけ) # IP法に基づくIVA実行コード(引数に与える関数を変更するだけ) # パーミュテーションを解く # コストの値を表示 # for t in range(n_ica_iterations): # print(t,cost_buff_ica[t],cost_buff_iva[t],cost_buff_iva_ip[t])
2.350712
2
python_service/singleton/mynewsite/python_book/models.py
Mishco/Gof_patterns
0
6629770
<filename>python_service/singleton/mynewsite/python_book/models.py from django.db import models from django.contrib.auth.models import User # Create your models here. class Task(models.Model): TASK_STATUS = ( ('on_hold', 'On Hold'), ('complete', 'Complete'), ('in_progress', 'In Progress'), ('to_do', 'To do'), ) task = models.CharField(max_length=250) author = models.ForeignKey(User, related_name='ToDoList') body = models.TextField() created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) status = models.CharField(max_length=20, choices=TASK_STATUS, default='to_do') class Meta: ordering = ('-created',) def __str__(self): return self.task
<filename>python_service/singleton/mynewsite/python_book/models.py from django.db import models from django.contrib.auth.models import User # Create your models here. class Task(models.Model): TASK_STATUS = ( ('on_hold', 'On Hold'), ('complete', 'Complete'), ('in_progress', 'In Progress'), ('to_do', 'To do'), ) task = models.CharField(max_length=250) author = models.ForeignKey(User, related_name='ToDoList') body = models.TextField() created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) status = models.CharField(max_length=20, choices=TASK_STATUS, default='to_do') class Meta: ordering = ('-created',) def __str__(self): return self.task
en
0.963489
# Create your models here.
2.364683
2
init_db.py
Aspect13/shared
0
6629771
from .db_manager import Base, engine def init_db(): from .models import vault Base.metadata.create_all(bind=engine)
from .db_manager import Base, engine def init_db(): from .models import vault Base.metadata.create_all(bind=engine)
none
1
1.417576
1
Lib/site-packages/poyo/_nodes.py
hirorin-demon/hirorin-streamlit
144
6629772
<filename>Lib/site-packages/poyo/_nodes.py # -*- coding: utf-8 -*- class TreeElement(object): """Helper class to identify internal classes.""" def __init__(self, **kwargs): pass class ContainerMixin(object): """Mixin that can hold TreeElement instances. Containers can be called to return a dict representation. """ def __init__(self, **kwargs): self._children = [] super(ContainerMixin, self).__init__(**kwargs) def __iter__(self): for c in self._children: yield c def __call__(self): return {c.name: c() for c in self} def add_child(self, child): """If the given object is an instance of Child add it to self and register self as a parent. """ if not isinstance(child, ChildMixin): raise TypeError( "Requires instance of TreeElement. " "Got {}".format(type(child)) ) child.parent = self self._children.append(child) class ChildMixin(object): """Mixin that can be attached to Container object.""" def __init__(self, **kwargs): parent = kwargs["parent"] if not isinstance(parent, ContainerMixin): raise ValueError("Parent of ChildMixin instance needs to be a Container.") parent.add_child(self) super(ChildMixin, self).__init__(**kwargs) class Root(ContainerMixin, TreeElement): """Pure Container class to represent the root of a YAML config.""" def __init__(self, **kwargs): super(Root, self).__init__(**kwargs) self.level = -1 class Section(ContainerMixin, ChildMixin, TreeElement): """Class that can act as a Child, but also as a Container.""" def __init__(self, name, level, **kwargs): super(Section, self).__init__(**kwargs) self.name = name self.level = level def __repr__(self): return u"<Section name: {name}>".format(name=self.name) class Simple(ChildMixin, TreeElement): """Class that can solely be used as a Child, f.i. simple key value pairs in a config. """ def __init__(self, name, level, value, **kwargs): super(Simple, self).__init__(**kwargs) self.name = name self.level = level self.value = value def __call__(self): return self.value def __repr__(self): return u"<Simple name: {name}, value: {value}>".format( name=self.name, value=self.value )
<filename>Lib/site-packages/poyo/_nodes.py # -*- coding: utf-8 -*- class TreeElement(object): """Helper class to identify internal classes.""" def __init__(self, **kwargs): pass class ContainerMixin(object): """Mixin that can hold TreeElement instances. Containers can be called to return a dict representation. """ def __init__(self, **kwargs): self._children = [] super(ContainerMixin, self).__init__(**kwargs) def __iter__(self): for c in self._children: yield c def __call__(self): return {c.name: c() for c in self} def add_child(self, child): """If the given object is an instance of Child add it to self and register self as a parent. """ if not isinstance(child, ChildMixin): raise TypeError( "Requires instance of TreeElement. " "Got {}".format(type(child)) ) child.parent = self self._children.append(child) class ChildMixin(object): """Mixin that can be attached to Container object.""" def __init__(self, **kwargs): parent = kwargs["parent"] if not isinstance(parent, ContainerMixin): raise ValueError("Parent of ChildMixin instance needs to be a Container.") parent.add_child(self) super(ChildMixin, self).__init__(**kwargs) class Root(ContainerMixin, TreeElement): """Pure Container class to represent the root of a YAML config.""" def __init__(self, **kwargs): super(Root, self).__init__(**kwargs) self.level = -1 class Section(ContainerMixin, ChildMixin, TreeElement): """Class that can act as a Child, but also as a Container.""" def __init__(self, name, level, **kwargs): super(Section, self).__init__(**kwargs) self.name = name self.level = level def __repr__(self): return u"<Section name: {name}>".format(name=self.name) class Simple(ChildMixin, TreeElement): """Class that can solely be used as a Child, f.i. simple key value pairs in a config. """ def __init__(self, name, level, value, **kwargs): super(Simple, self).__init__(**kwargs) self.name = name self.level = level self.value = value def __call__(self): return self.value def __repr__(self): return u"<Simple name: {name}, value: {value}>".format( name=self.name, value=self.value )
en
0.897765
# -*- coding: utf-8 -*- Helper class to identify internal classes. Mixin that can hold TreeElement instances. Containers can be called to return a dict representation. If the given object is an instance of Child add it to self and register self as a parent. Mixin that can be attached to Container object. Pure Container class to represent the root of a YAML config. Class that can act as a Child, but also as a Container. Class that can solely be used as a Child, f.i. simple key value pairs in a config.
2.584162
3
django_eventstream/eventstream.py
Vutivi/group-jabber
0
6629773
<gh_stars>0 import copy from .storage import EventDoesNotExist from .eventresponse import EventResponse from .utils import make_id, publish_event, publish_kick, \ get_storage, get_channelmanager, have_channels class EventPermissionError(Exception): def __init__(self, message, channels=[]): super(Exception, self).__init__(message) self.channels = copy.deepcopy(channels) def send_event(channel, event_type, data, skip_user_ids=[]): from .event import Event storage = get_storage() channelmanager = get_channelmanager() if channelmanager.is_channel_reliable(channel) and storage: e = storage.append_event(channel, event_type, data) pub_id = str(e.id) pub_prev_id = str(e.id - 1) else: e = Event(channel, event_type, data) pub_id = None pub_prev_id = None if have_channels(): from .consumers import get_listener_manager # send to local listeners get_listener_manager().add_to_queues(channel, e) # publish through grip proxy publish_event( channel, event_type, data, pub_id, pub_prev_id, skip_user_ids=skip_user_ids) def get_events(request, limit=100, user=None): resp = EventResponse() resp.is_next = request.is_next resp.is_recover = request.is_recover resp.user = user if len(request.channels) == 0: return resp limit_per_type = int(limit / len(request.channels)) if limit_per_type < 1: limit_per_type = 1 storage = get_storage() channelmanager = get_channelmanager() inaccessible_channels = [] for channel in request.channels: if not channelmanager.can_read_channel(user, channel): inaccessible_channels.append(channel) if len(inaccessible_channels) > 0: msg = 'Permission denied to channels: %s' % ( ', '.join(inaccessible_channels)) raise EventPermissionError(msg, channels=inaccessible_channels) for channel in request.channels: reset = False last_id = request.channel_last_ids.get(channel) more = False if channelmanager.is_channel_reliable(channel) and storage: if last_id is not None: try: events = storage.get_events( channel, int(last_id), limit=limit_per_type + 1) if len(events) >= limit_per_type + 1: events = events[:limit_per_type] more = True except EventDoesNotExist as e: reset = True events = [] last_id = str(e.current_id) else: events = [] last_id = str(storage.get_current_id(channel)) else: events = [] last_id = None resp.channel_items[channel] = events if last_id is not None: resp.channel_last_ids[channel] = last_id if reset: resp.channel_reset.add(channel) if more: resp.channel_more.add(channel) return resp def get_current_event_id(channels): storage = get_storage() cur_ids = {} for channel in channels: cur_ids[channel] = str(storage.get_current_id(channel)) return make_id(cur_ids) def channel_permission_changed(user, channel): channelmanager = get_channelmanager() if not channelmanager.can_read_channel(user, channel): user_id = user.id if user else 'anonymous' if have_channels(): from .consumers import get_listener_manager # kick local listeners get_listener_manager().kick(user_id, channel) # kick users connected to grip proxy publish_kick(user_id, channel)
import copy from .storage import EventDoesNotExist from .eventresponse import EventResponse from .utils import make_id, publish_event, publish_kick, \ get_storage, get_channelmanager, have_channels class EventPermissionError(Exception): def __init__(self, message, channels=[]): super(Exception, self).__init__(message) self.channels = copy.deepcopy(channels) def send_event(channel, event_type, data, skip_user_ids=[]): from .event import Event storage = get_storage() channelmanager = get_channelmanager() if channelmanager.is_channel_reliable(channel) and storage: e = storage.append_event(channel, event_type, data) pub_id = str(e.id) pub_prev_id = str(e.id - 1) else: e = Event(channel, event_type, data) pub_id = None pub_prev_id = None if have_channels(): from .consumers import get_listener_manager # send to local listeners get_listener_manager().add_to_queues(channel, e) # publish through grip proxy publish_event( channel, event_type, data, pub_id, pub_prev_id, skip_user_ids=skip_user_ids) def get_events(request, limit=100, user=None): resp = EventResponse() resp.is_next = request.is_next resp.is_recover = request.is_recover resp.user = user if len(request.channels) == 0: return resp limit_per_type = int(limit / len(request.channels)) if limit_per_type < 1: limit_per_type = 1 storage = get_storage() channelmanager = get_channelmanager() inaccessible_channels = [] for channel in request.channels: if not channelmanager.can_read_channel(user, channel): inaccessible_channels.append(channel) if len(inaccessible_channels) > 0: msg = 'Permission denied to channels: %s' % ( ', '.join(inaccessible_channels)) raise EventPermissionError(msg, channels=inaccessible_channels) for channel in request.channels: reset = False last_id = request.channel_last_ids.get(channel) more = False if channelmanager.is_channel_reliable(channel) and storage: if last_id is not None: try: events = storage.get_events( channel, int(last_id), limit=limit_per_type + 1) if len(events) >= limit_per_type + 1: events = events[:limit_per_type] more = True except EventDoesNotExist as e: reset = True events = [] last_id = str(e.current_id) else: events = [] last_id = str(storage.get_current_id(channel)) else: events = [] last_id = None resp.channel_items[channel] = events if last_id is not None: resp.channel_last_ids[channel] = last_id if reset: resp.channel_reset.add(channel) if more: resp.channel_more.add(channel) return resp def get_current_event_id(channels): storage = get_storage() cur_ids = {} for channel in channels: cur_ids[channel] = str(storage.get_current_id(channel)) return make_id(cur_ids) def channel_permission_changed(user, channel): channelmanager = get_channelmanager() if not channelmanager.can_read_channel(user, channel): user_id = user.id if user else 'anonymous' if have_channels(): from .consumers import get_listener_manager # kick local listeners get_listener_manager().kick(user_id, channel) # kick users connected to grip proxy publish_kick(user_id, channel)
en
0.708657
# send to local listeners # publish through grip proxy # kick local listeners # kick users connected to grip proxy
1.969462
2
mocks_factory/Cats2healpixmaps.py
mehdirezaie/SYSNet
6
6629774
''' code to read the cut sky mocks and make them like a healpix map mpirun -np 2 python Cats2healpixmaps.py --path /Volumes/TimeMachine/data/mocks/dr5mocks/ --ext seed*/3dbox_nmesh1024_L5274.0_bias1.5_seed*.cat --nside 256 --zlim 0.7 1.2 ''' import healpy as hp import numpy as np def hpixsum(nside, ra, dec, value=None, nest=False): ''' cc: Imaginglss Ellie and Yu make a healpix map from ra-dec hpixsum(nside, ra, dec, value=None, nest=False) ''' pix = hp.ang2pix(nside, np.radians(90 - dec), np.radians(ra), nest=nest) npix = hp.nside2npix(nside) w = np.bincount(pix, weights=value, minlength=npix) return w def read_write(path2file, path2output, nside, zlim=None): data = np.loadtxt(path2file) m = np.ones(data[:,2].size, dtype='?') if zlim is not None: m &= (data[:,2]>=zlim[0]) & (data[:,2]< zlim[1]) galmap = hpixsum(nside, data[m,0], data[m,1]) hp.write_map(path2output, galmap, fits_IDL=False, dtype=np.float64, overwrite=True) del data def loop_filenames(filenames, nside, zlim): for file in filenames: #fn = file.split('/')[-1] #inputf = file + '/' + fn + '.cat' #outputf = file + '/' + fn + 'hp'+ str(nside) + 'v2.fits' inputf = file outputf = file[:-4] + 'hp'+str(nside)+'v0.fits' # v2 is with zcut read_write(inputf, outputf, nside, zlim) # mpi from mpi4py import MPI # get the size, and the rank of each mpi task comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() if rank == 0: from glob import glob from argparse import ArgumentParser ap = ArgumentParser(description='Read BigFile mocks and write .dat') ap.add_argument('--path', default='/global/cscratch1/sd/mehdi/mocks/3dbox/') ap.add_argument('--ext', default='*') ap.add_argument('--nside', type=int, default=256) ap.add_argument('--zlim', type=float, nargs='*', default=None) ns = ap.parse_args() #FILES = glob(ns.path+ns.ext) # already done for 1-100 FILES = [ns.path+'seed'+str(i)+'/3dbox_nmesh1024_L5274.0_bias1.5_seed'+str(i)+'.cat' for i in range(1, 201)] NSIDE = ns.nside zlim = ns.zlim log = '# --- create healpix maps with nside of {} ----\n'.format(NSIDE) log += '# PATH : {} nfiles : {} files : *{} \n'.format(ns.path, len(FILES), ns.ext) log += '# zlim : {}'.format(zlim) print(log) else: FILES = None NSIDE = None zlim = None # bcast FILES FILES = comm.bcast(FILES, root=0) NSIDE = comm.bcast(NSIDE, root=0) zlim = comm.bcast(zlim, root=0) # # distribute files on different task ids # chunksize nfiles = len(FILES) if np.mod(nfiles, size) == 0: chunksize = nfiles // size else: chunksize = nfiles // size + 1 my_i = rank*chunksize if rank*chunksize + chunksize > nfiles: my_end = None else: my_end = rank*chunksize + chunksize my_chunk = FILES[my_i:my_end] print('files on rank {} are {}'.format(rank, my_chunk)) # for filei in my_chunk: # print(filei.split('/')[-1]) # # read BigFile and write in a .dat file # loop_filenames(my_chunk, NSIDE, zlim)
''' code to read the cut sky mocks and make them like a healpix map mpirun -np 2 python Cats2healpixmaps.py --path /Volumes/TimeMachine/data/mocks/dr5mocks/ --ext seed*/3dbox_nmesh1024_L5274.0_bias1.5_seed*.cat --nside 256 --zlim 0.7 1.2 ''' import healpy as hp import numpy as np def hpixsum(nside, ra, dec, value=None, nest=False): ''' cc: Imaginglss Ellie and Yu make a healpix map from ra-dec hpixsum(nside, ra, dec, value=None, nest=False) ''' pix = hp.ang2pix(nside, np.radians(90 - dec), np.radians(ra), nest=nest) npix = hp.nside2npix(nside) w = np.bincount(pix, weights=value, minlength=npix) return w def read_write(path2file, path2output, nside, zlim=None): data = np.loadtxt(path2file) m = np.ones(data[:,2].size, dtype='?') if zlim is not None: m &= (data[:,2]>=zlim[0]) & (data[:,2]< zlim[1]) galmap = hpixsum(nside, data[m,0], data[m,1]) hp.write_map(path2output, galmap, fits_IDL=False, dtype=np.float64, overwrite=True) del data def loop_filenames(filenames, nside, zlim): for file in filenames: #fn = file.split('/')[-1] #inputf = file + '/' + fn + '.cat' #outputf = file + '/' + fn + 'hp'+ str(nside) + 'v2.fits' inputf = file outputf = file[:-4] + 'hp'+str(nside)+'v0.fits' # v2 is with zcut read_write(inputf, outputf, nside, zlim) # mpi from mpi4py import MPI # get the size, and the rank of each mpi task comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() if rank == 0: from glob import glob from argparse import ArgumentParser ap = ArgumentParser(description='Read BigFile mocks and write .dat') ap.add_argument('--path', default='/global/cscratch1/sd/mehdi/mocks/3dbox/') ap.add_argument('--ext', default='*') ap.add_argument('--nside', type=int, default=256) ap.add_argument('--zlim', type=float, nargs='*', default=None) ns = ap.parse_args() #FILES = glob(ns.path+ns.ext) # already done for 1-100 FILES = [ns.path+'seed'+str(i)+'/3dbox_nmesh1024_L5274.0_bias1.5_seed'+str(i)+'.cat' for i in range(1, 201)] NSIDE = ns.nside zlim = ns.zlim log = '# --- create healpix maps with nside of {} ----\n'.format(NSIDE) log += '# PATH : {} nfiles : {} files : *{} \n'.format(ns.path, len(FILES), ns.ext) log += '# zlim : {}'.format(zlim) print(log) else: FILES = None NSIDE = None zlim = None # bcast FILES FILES = comm.bcast(FILES, root=0) NSIDE = comm.bcast(NSIDE, root=0) zlim = comm.bcast(zlim, root=0) # # distribute files on different task ids # chunksize nfiles = len(FILES) if np.mod(nfiles, size) == 0: chunksize = nfiles // size else: chunksize = nfiles // size + 1 my_i = rank*chunksize if rank*chunksize + chunksize > nfiles: my_end = None else: my_end = rank*chunksize + chunksize my_chunk = FILES[my_i:my_end] print('files on rank {} are {}'.format(rank, my_chunk)) # for filei in my_chunk: # print(filei.split('/')[-1]) # # read BigFile and write in a .dat file # loop_filenames(my_chunk, NSIDE, zlim)
en
0.567667
code to read the cut sky mocks and make them like a healpix map mpirun -np 2 python Cats2healpixmaps.py --path /Volumes/TimeMachine/data/mocks/dr5mocks/ --ext seed*/3dbox_nmesh1024_L5274.0_bias1.5_seed*.cat --nside 256 --zlim 0.7 1.2 cc: Imaginglss Ellie and Yu make a healpix map from ra-dec hpixsum(nside, ra, dec, value=None, nest=False) #fn = file.split('/')[-1] #inputf = file + '/' + fn + '.cat' #outputf = file + '/' + fn + 'hp'+ str(nside) + 'v2.fits' # v2 is with zcut # mpi # get the size, and the rank of each mpi task #FILES = glob(ns.path+ns.ext) # already done for 1-100 # bcast FILES # # distribute files on different task ids # chunksize # for filei in my_chunk: # print(filei.split('/')[-1]) # # read BigFile and write in a .dat file #
2.03348
2
open_seq2seq/data/speech2text/speech_utils.py
borisgin/OpenSeq2Seq
1
6629775
# Copyright (c) 2018 NVIDIA Corporation from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import math import os import h5py import numpy as np import resampy as rs import scipy.io.wavfile as wave BACKENDS = [] try: import python_speech_features as psf BACKENDS.append('psf') except ImportError: pass try: import librosa BACKENDS.append('librosa') except ImportError: pass WINDOWS_FNS = {"hanning": np.hanning, "hamming": np.hamming, "none": None} class PreprocessOnTheFlyException(Exception): """ Exception that is thrown to not load preprocessed features from disk; recompute on-the-fly. This saves disk space (if you're experimenting with data input formats/preprocessing) but can be slower. The slowdown is especially apparent for small, fast NNs.""" pass class RegenerateCacheException(Exception): """ Exception that is thrown to force recomputation of (preprocessed) features """ pass def load_features(path, data_format): """ Function to load (preprocessed) features from disk Args: :param path: the path where the features are stored :param data_format: the format in which the features are stored :return: tuple of (features, duration) """ if data_format == 'hdf5': with h5py.File(path + '.hdf5', "r") as hf5_file: features = hf5_file["features"][:] duration = hf5_file["features"].attrs["duration"] elif data_format == 'npy': features, duration = np.load(path + '.npy') elif data_format == 'npz': data = np.load(path + '.npz') features = data['features'] duration = data['duration'] else: raise ValueError("Invalid data format for caching: ", data_format, "!\n", "options: hdf5, npy, npz") return features, duration def save_features(features, duration, path, data_format, verbose=False): """ Function to save (preprocessed) features to disk Args: :param features: features :param duration: metadata: duration in seconds of audio file :param path: path to store the data :param data_format: format to store the data in ('npy', 'npz', 'hdf5') """ if verbose: print("Saving to: ", path) if data_format == 'hdf5': with h5py.File(path + '.hdf5', "w") as hf5_file: dset = hf5_file.create_dataset("features", data=features) dset.attrs["duration"] = duration elif data_format == 'npy': np.save(path + '.npy', [features, duration]) elif data_format == 'npz': np.savez(path + '.npz', features=features, duration=duration) else: raise ValueError("Invalid data format for caching: ", data_format, "!\n", "options: hdf5, npy, npz") def get_preprocessed_data_path(filename, params): """ Function to convert the audio path into the path to the preprocessed version of this audio Args: :param filename: WAVE filename :param params: dictionary containing preprocessing parameters :return: path to new file (without extension). The path is generated from the relevant preprocessing parameters. """ if isinstance(filename, bytes): # convert binary string to normal string filename = filename.decode('ascii') filename = os.path.realpath(filename) # decode symbolic links ## filter relevant parameters # TODO is there a cleaner way of doing this? # print(list(params.keys())) ignored_params = ["cache_features", "cache_format", "cache_regenerate", "vocab_file", "dataset_files", "shuffle", "batch_size", "max_duration", "mode", "interactive", "autoregressive", "char2idx", "tgt_vocab_size", "idx2char", "dtype"] def fix_kv(text): """ Helper function to shorten length of filenames to get around filesystem path length limitations""" text = str(text) text = text.replace("speed_perturbation_ratio", "sp") \ .replace("noise_level_min", "nlmin", ) \ .replace("noise_level_max", "nlmax") \ .replace("add_derivatives", "d") \ .replace("add_second_derivatives", "dd") return text # generate the identifier by simply concatenating preprocessing key-value # pairs as strings. preprocess_id = "-".join( [fix_kv(k) + "_" + fix_kv(v) for k, v in params.items() if k not in ignored_params]) preprocessed_dir = os.path.dirname(filename).replace("wav", "preprocessed-" + preprocess_id) preprocessed_path = os.path.join(preprocessed_dir, os.path.basename(filename).replace(".wav", "")) # create dir if it doesn't exist yet if not os.path.exists(preprocessed_dir): os.makedirs(preprocessed_dir) return preprocessed_path def get_speech_features_from_file(filename, params): """Function to get a numpy array of features, from an audio file. if params['cache_features']==True, try load preprocessed data from disk, or store after preprocesseng. else, perform preprocessing on-the-fly. Args: filename (string): WAVE filename. params (dict): the following parameters num_features (int): number of speech features in frequency domain. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. window (str): window function to apply dither (float): weight of Gaussian noise to apply to input signal for dithering/preventing quantization noise num_fft (int): size of fft window to use if features require fft, defaults to smallest power of 2 larger than window size norm_per_feature (bool): if True, the output features will be normalized (whitened) individually. if False, a global mean/std over all features will be used for normalization Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. """ cache_features = params.get('cache_features', False) cache_format = params.get('cache_format', 'hdf5') cache_regenerate = params.get('cache_regenerate', False) try: if not cache_features: raise PreprocessOnTheFlyException( "on-the-fly preprocessing enforced with 'cache_features'==True") if cache_regenerate: raise RegenerateCacheException("regenerating cache...") preprocessed_data_path = get_preprocessed_data_path(filename, params) features, duration = load_features(preprocessed_data_path, data_format=cache_format) except PreprocessOnTheFlyException: sample_freq, signal = wave.read(filename) features, duration = get_speech_features(signal, sample_freq, params) except (OSError, FileNotFoundError, RegenerateCacheException): sample_freq, signal = wave.read(filename) features, duration = get_speech_features(signal, sample_freq, params) preprocessed_data_path = get_preprocessed_data_path(filename, params) save_features(features, duration, preprocessed_data_path, data_format=cache_format) return features, duration def normalize_signal(signal): """ Normalize float32 signal to [-1, 1] range """ return signal / (np.max(np.abs(signal)) + 1e-5) def augment_audio_signal(signal, sample_freq, augmentation): """Function that performs audio signal augmentation. Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. augmentation (dict, optional): None or dictionary of augmentation parameters. If not None, has to have 'speed_perturbation_ratio', 'noise_level_min', or 'noise_level_max' fields, e.g.:: augmentation={ 'speed_perturbation_ratio': 0.2, 'noise_level_min': -90, 'noise_level_max': -46, } 'speed_perturbation_ratio' can either be a list of possible speed perturbation factors or a float. If float, a random value from U[1-speed_perturbation_ratio, 1+speed_perturbation_ratio]. Returns: np.array: np.array with augmented audio signal. """ signal_float = normalize_signal(signal.astype(np.float32)) if 'speed_perturbation_ratio' in augmentation: stretch_amount = -1 if isinstance(augmentation['speed_perturbation_ratio'], list): stretch_amount = np.random.choice(augmentation['speed_perturbation_ratio']) elif augmentation['speed_perturbation_ratio'] > 0: # time stretch (might be slow) stretch_amount = 1.0 + (2.0 * np.random.rand() - 1.0) * \ augmentation['speed_perturbation_ratio'] if stretch_amount > 0: signal_float = rs.resample( signal_float, sample_freq, int(sample_freq * stretch_amount), filter='kaiser_best', ) # noise if 'noise_level_min' in augmentation and 'noise_level_max' in augmentation: noise_level_db = np.random.randint(low=augmentation['noise_level_min'], high=augmentation['noise_level_max']) signal_float += np.random.randn(signal_float.shape[0]) * \ 10.0 ** (noise_level_db / 20.0) return normalize_signal(signal_float) def preemphasis(signal, coeff=0.97): return np.append(signal[0], signal[1:] - coeff * signal[:-1]) def get_speech_features(signal, sample_freq, params): """ Get speech features using either librosa (recommended) or python_speech_features Args: signal (np.array): np.array containing raw audio signal sample_freq (float): sample rate of the signal params (dict): parameters of pre-processing Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds """ backend = params.get('backend', 'psf') features_type = params.get('input_type', 'spectrogram') num_features = params['num_audio_features'] window_size = params.get('window_size', 20e-3) window_stride = params.get('window_stride', 10e-3) augmentation = params.get('augmentation', None) if backend == 'librosa': window_fn = WINDOWS_FNS[params.get('window', "hanning")] dither = params.get('dither', 0.0) num_fft = params.get('num_fft', None) norm_per_feature = params.get('norm_per_feature', False) mel_basis = params.get('mel_basis', None) if mel_basis is not None and sample_freq != params["sample_freq"]: raise ValueError( ("The sampling frequency set in params {} does not match the " "frequency {} read from file {}").format(params["sample_freq"], sample_freq, filename) ) features, duration = get_speech_features_librosa( signal, sample_freq, num_features, features_type, window_size, window_stride, augmentation, window_fn=window_fn, dither=dither, norm_per_feature=norm_per_feature, num_fft=num_fft, mel_basis=mel_basis ) else: pad_to = params.get('pad_to', 8) features, duration = get_speech_features_psf( signal, sample_freq, num_features, pad_to, features_type, window_size, window_stride, augmentation ) return features, duration def get_speech_features_librosa(signal, sample_freq, num_features, features_type='spectrogram', window_size=20e-3, window_stride=10e-3, augmentation=None, window_fn=np.hanning, num_fft=None, dither=0.0, norm_per_feature=False, mel_basis=None): """Function to convert raw audio signal to numpy array of features. Backend: librosa Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. num_features (int): number of speech features in frequency domain. pad_to (int): if specified, the length will be padded to become divisible by ``pad_to`` parameter. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds """ if augmentation: signal = augment_audio_signal(signal.astype(np.float32), sample_freq, augmentation) else: signal = normalize_signal(signal.astype(np.float32)) audio_duration = len(signal) * 1.0 / sample_freq n_window_size = int(sample_freq * window_size) n_window_stride = int(sample_freq * window_stride) num_fft = num_fft or 2**math.ceil(math.log2(window_size*sample_freq)) if dither > 0: signal += dither*np.random.randn(*signal.shape) if features_type == 'spectrogram': # ignore 1/n_fft multiplier, since there is a post-normalization powspec = np.square(np.abs(librosa.core.stft( signal, n_fft=n_window_size, hop_length=n_window_stride, win_length=n_window_size, center=True, window=window_fn))) # remove small bins powspec[powspec <= 1e-30] = 1e-30 features = 10 * np.log10(powspec.T) assert num_features <= n_window_size // 2 + 1, \ "num_features for spectrogram should be <= (sample_freq * window_size // 2 + 1)" # cut high frequency part features = features[:, :num_features] elif features_type == 'mfcc': signal = preemphasis(signal, coeff=0.97) S = np.square( np.abs( librosa.core.stft(signal, n_fft=num_fft, hop_length=int(window_stride * sample_freq), win_length=int(window_size * sample_freq), center=True, window=window_fn ) ) ) features = librosa.feature.mfcc(sr=sample_freq, S=S, n_mfcc=num_features, n_mels=2*num_features).T elif features_type == 'logfbank': signal = preemphasis(signal,coeff=0.97) S = np.abs(librosa.core.stft(signal, n_fft=num_fft, hop_length=int(window_stride * sample_freq), win_length=int(window_size * sample_freq), center=True, window=window_fn))**2.0 if mel_basis is None: # Build a Mel filter mel_basis = librosa.filters.mel(sample_freq, num_fft, n_mels=num_features, fmin=0, fmax=int(sample_freq/2)) features = np.log(np.dot(mel_basis, S) + 1e-20).T else: raise ValueError('Unknown features type: {}'.format(features_type)) norm_axis = 0 if norm_per_feature else None mean = np.mean(features, axis=norm_axis) std_dev = np.std(features, axis=norm_axis) features = (features - mean) / std_dev # now it is safe to pad # if pad_to > 0: # if features.shape[0] % pad_to != 0: # pad_size = pad_to - features.shape[0] % pad_to # if pad_size != 0: # features = np.pad(features, ((0,pad_size), (0,0)), mode='constant') return features, audio_duration def get_speech_features_psf(signal, sample_freq, num_features, pad_to=8, features_type='spectrogram', window_size=20e-3, window_stride=10e-3, augmentation=None): """Function to convert raw audio signal to numpy array of features. Backend: python_speech_features Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. num_features (int): number of speech features in frequency domain. pad_to (int): if specified, the length will be padded to become divisible by ``pad_to`` parameter. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. apply_window (bool): whether to apply Hann window for mfcc and logfbank. python_speech_features version should accept winfunc if it is True. Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds """ if augmentation is not None: signal = augment_audio_signal(signal, sample_freq, augmentation) else: signal = (normalize_signal(signal.astype(np.float32)) * 32767.0).astype( np.int16) audio_duration = len(signal) * 1.0 / sample_freq n_window_size = int(sample_freq * window_size) n_window_stride = int(sample_freq * window_stride) # making sure length of the audio is divisible by 8 (fp16 optimization) length = 1 + int(math.ceil( (1.0 * signal.shape[0] - n_window_size) / n_window_stride )) if pad_to > 0: if length % pad_to != 0: pad_size = (pad_to - length % pad_to) * n_window_stride signal = np.pad(signal, (0, pad_size), mode='constant') if features_type == 'spectrogram': frames = psf.sigproc.framesig(sig=signal, frame_len=n_window_size, frame_step=n_window_stride, winfunc=np.hanning) # features = np.log1p(psf.sigproc.powspec(frames, NFFT=N_window_size)) features = psf.sigproc.logpowspec(frames, NFFT=n_window_size) assert num_features <= n_window_size // 2 + 1, \ "num_features for spectrogram should be <= (sample_freq * window_size // 2 + 1)" # cut high frequency part features = features[:, :num_features] elif features_type == 'mfcc': features = psf.mfcc(signal=signal, samplerate=sample_freq, winlen=window_size, winstep=window_stride, numcep=num_features, nfilt=2 * num_features, nfft=512, lowfreq=0, highfreq=None, preemph=0.97, ceplifter=2 * num_features, appendEnergy=False) elif features_type == 'logfbank': features = psf.logfbank(signal=signal, samplerate=sample_freq, winlen=window_size, winstep=window_stride, nfilt=num_features, nfft=512, lowfreq=0, highfreq=sample_freq / 2, preemph=0.97) else: raise ValueError('Unknown features type: {}'.format(features_type)) if pad_to > 0: assert features.shape[0] % pad_to == 0 mean = np.mean(features) std_dev = np.std(features) features = (features - mean) / std_dev return features, audio_duration
# Copyright (c) 2018 NVIDIA Corporation from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import math import os import h5py import numpy as np import resampy as rs import scipy.io.wavfile as wave BACKENDS = [] try: import python_speech_features as psf BACKENDS.append('psf') except ImportError: pass try: import librosa BACKENDS.append('librosa') except ImportError: pass WINDOWS_FNS = {"hanning": np.hanning, "hamming": np.hamming, "none": None} class PreprocessOnTheFlyException(Exception): """ Exception that is thrown to not load preprocessed features from disk; recompute on-the-fly. This saves disk space (if you're experimenting with data input formats/preprocessing) but can be slower. The slowdown is especially apparent for small, fast NNs.""" pass class RegenerateCacheException(Exception): """ Exception that is thrown to force recomputation of (preprocessed) features """ pass def load_features(path, data_format): """ Function to load (preprocessed) features from disk Args: :param path: the path where the features are stored :param data_format: the format in which the features are stored :return: tuple of (features, duration) """ if data_format == 'hdf5': with h5py.File(path + '.hdf5', "r") as hf5_file: features = hf5_file["features"][:] duration = hf5_file["features"].attrs["duration"] elif data_format == 'npy': features, duration = np.load(path + '.npy') elif data_format == 'npz': data = np.load(path + '.npz') features = data['features'] duration = data['duration'] else: raise ValueError("Invalid data format for caching: ", data_format, "!\n", "options: hdf5, npy, npz") return features, duration def save_features(features, duration, path, data_format, verbose=False): """ Function to save (preprocessed) features to disk Args: :param features: features :param duration: metadata: duration in seconds of audio file :param path: path to store the data :param data_format: format to store the data in ('npy', 'npz', 'hdf5') """ if verbose: print("Saving to: ", path) if data_format == 'hdf5': with h5py.File(path + '.hdf5', "w") as hf5_file: dset = hf5_file.create_dataset("features", data=features) dset.attrs["duration"] = duration elif data_format == 'npy': np.save(path + '.npy', [features, duration]) elif data_format == 'npz': np.savez(path + '.npz', features=features, duration=duration) else: raise ValueError("Invalid data format for caching: ", data_format, "!\n", "options: hdf5, npy, npz") def get_preprocessed_data_path(filename, params): """ Function to convert the audio path into the path to the preprocessed version of this audio Args: :param filename: WAVE filename :param params: dictionary containing preprocessing parameters :return: path to new file (without extension). The path is generated from the relevant preprocessing parameters. """ if isinstance(filename, bytes): # convert binary string to normal string filename = filename.decode('ascii') filename = os.path.realpath(filename) # decode symbolic links ## filter relevant parameters # TODO is there a cleaner way of doing this? # print(list(params.keys())) ignored_params = ["cache_features", "cache_format", "cache_regenerate", "vocab_file", "dataset_files", "shuffle", "batch_size", "max_duration", "mode", "interactive", "autoregressive", "char2idx", "tgt_vocab_size", "idx2char", "dtype"] def fix_kv(text): """ Helper function to shorten length of filenames to get around filesystem path length limitations""" text = str(text) text = text.replace("speed_perturbation_ratio", "sp") \ .replace("noise_level_min", "nlmin", ) \ .replace("noise_level_max", "nlmax") \ .replace("add_derivatives", "d") \ .replace("add_second_derivatives", "dd") return text # generate the identifier by simply concatenating preprocessing key-value # pairs as strings. preprocess_id = "-".join( [fix_kv(k) + "_" + fix_kv(v) for k, v in params.items() if k not in ignored_params]) preprocessed_dir = os.path.dirname(filename).replace("wav", "preprocessed-" + preprocess_id) preprocessed_path = os.path.join(preprocessed_dir, os.path.basename(filename).replace(".wav", "")) # create dir if it doesn't exist yet if not os.path.exists(preprocessed_dir): os.makedirs(preprocessed_dir) return preprocessed_path def get_speech_features_from_file(filename, params): """Function to get a numpy array of features, from an audio file. if params['cache_features']==True, try load preprocessed data from disk, or store after preprocesseng. else, perform preprocessing on-the-fly. Args: filename (string): WAVE filename. params (dict): the following parameters num_features (int): number of speech features in frequency domain. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. window (str): window function to apply dither (float): weight of Gaussian noise to apply to input signal for dithering/preventing quantization noise num_fft (int): size of fft window to use if features require fft, defaults to smallest power of 2 larger than window size norm_per_feature (bool): if True, the output features will be normalized (whitened) individually. if False, a global mean/std over all features will be used for normalization Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. """ cache_features = params.get('cache_features', False) cache_format = params.get('cache_format', 'hdf5') cache_regenerate = params.get('cache_regenerate', False) try: if not cache_features: raise PreprocessOnTheFlyException( "on-the-fly preprocessing enforced with 'cache_features'==True") if cache_regenerate: raise RegenerateCacheException("regenerating cache...") preprocessed_data_path = get_preprocessed_data_path(filename, params) features, duration = load_features(preprocessed_data_path, data_format=cache_format) except PreprocessOnTheFlyException: sample_freq, signal = wave.read(filename) features, duration = get_speech_features(signal, sample_freq, params) except (OSError, FileNotFoundError, RegenerateCacheException): sample_freq, signal = wave.read(filename) features, duration = get_speech_features(signal, sample_freq, params) preprocessed_data_path = get_preprocessed_data_path(filename, params) save_features(features, duration, preprocessed_data_path, data_format=cache_format) return features, duration def normalize_signal(signal): """ Normalize float32 signal to [-1, 1] range """ return signal / (np.max(np.abs(signal)) + 1e-5) def augment_audio_signal(signal, sample_freq, augmentation): """Function that performs audio signal augmentation. Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. augmentation (dict, optional): None or dictionary of augmentation parameters. If not None, has to have 'speed_perturbation_ratio', 'noise_level_min', or 'noise_level_max' fields, e.g.:: augmentation={ 'speed_perturbation_ratio': 0.2, 'noise_level_min': -90, 'noise_level_max': -46, } 'speed_perturbation_ratio' can either be a list of possible speed perturbation factors or a float. If float, a random value from U[1-speed_perturbation_ratio, 1+speed_perturbation_ratio]. Returns: np.array: np.array with augmented audio signal. """ signal_float = normalize_signal(signal.astype(np.float32)) if 'speed_perturbation_ratio' in augmentation: stretch_amount = -1 if isinstance(augmentation['speed_perturbation_ratio'], list): stretch_amount = np.random.choice(augmentation['speed_perturbation_ratio']) elif augmentation['speed_perturbation_ratio'] > 0: # time stretch (might be slow) stretch_amount = 1.0 + (2.0 * np.random.rand() - 1.0) * \ augmentation['speed_perturbation_ratio'] if stretch_amount > 0: signal_float = rs.resample( signal_float, sample_freq, int(sample_freq * stretch_amount), filter='kaiser_best', ) # noise if 'noise_level_min' in augmentation and 'noise_level_max' in augmentation: noise_level_db = np.random.randint(low=augmentation['noise_level_min'], high=augmentation['noise_level_max']) signal_float += np.random.randn(signal_float.shape[0]) * \ 10.0 ** (noise_level_db / 20.0) return normalize_signal(signal_float) def preemphasis(signal, coeff=0.97): return np.append(signal[0], signal[1:] - coeff * signal[:-1]) def get_speech_features(signal, sample_freq, params): """ Get speech features using either librosa (recommended) or python_speech_features Args: signal (np.array): np.array containing raw audio signal sample_freq (float): sample rate of the signal params (dict): parameters of pre-processing Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds """ backend = params.get('backend', 'psf') features_type = params.get('input_type', 'spectrogram') num_features = params['num_audio_features'] window_size = params.get('window_size', 20e-3) window_stride = params.get('window_stride', 10e-3) augmentation = params.get('augmentation', None) if backend == 'librosa': window_fn = WINDOWS_FNS[params.get('window', "hanning")] dither = params.get('dither', 0.0) num_fft = params.get('num_fft', None) norm_per_feature = params.get('norm_per_feature', False) mel_basis = params.get('mel_basis', None) if mel_basis is not None and sample_freq != params["sample_freq"]: raise ValueError( ("The sampling frequency set in params {} does not match the " "frequency {} read from file {}").format(params["sample_freq"], sample_freq, filename) ) features, duration = get_speech_features_librosa( signal, sample_freq, num_features, features_type, window_size, window_stride, augmentation, window_fn=window_fn, dither=dither, norm_per_feature=norm_per_feature, num_fft=num_fft, mel_basis=mel_basis ) else: pad_to = params.get('pad_to', 8) features, duration = get_speech_features_psf( signal, sample_freq, num_features, pad_to, features_type, window_size, window_stride, augmentation ) return features, duration def get_speech_features_librosa(signal, sample_freq, num_features, features_type='spectrogram', window_size=20e-3, window_stride=10e-3, augmentation=None, window_fn=np.hanning, num_fft=None, dither=0.0, norm_per_feature=False, mel_basis=None): """Function to convert raw audio signal to numpy array of features. Backend: librosa Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. num_features (int): number of speech features in frequency domain. pad_to (int): if specified, the length will be padded to become divisible by ``pad_to`` parameter. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds """ if augmentation: signal = augment_audio_signal(signal.astype(np.float32), sample_freq, augmentation) else: signal = normalize_signal(signal.astype(np.float32)) audio_duration = len(signal) * 1.0 / sample_freq n_window_size = int(sample_freq * window_size) n_window_stride = int(sample_freq * window_stride) num_fft = num_fft or 2**math.ceil(math.log2(window_size*sample_freq)) if dither > 0: signal += dither*np.random.randn(*signal.shape) if features_type == 'spectrogram': # ignore 1/n_fft multiplier, since there is a post-normalization powspec = np.square(np.abs(librosa.core.stft( signal, n_fft=n_window_size, hop_length=n_window_stride, win_length=n_window_size, center=True, window=window_fn))) # remove small bins powspec[powspec <= 1e-30] = 1e-30 features = 10 * np.log10(powspec.T) assert num_features <= n_window_size // 2 + 1, \ "num_features for spectrogram should be <= (sample_freq * window_size // 2 + 1)" # cut high frequency part features = features[:, :num_features] elif features_type == 'mfcc': signal = preemphasis(signal, coeff=0.97) S = np.square( np.abs( librosa.core.stft(signal, n_fft=num_fft, hop_length=int(window_stride * sample_freq), win_length=int(window_size * sample_freq), center=True, window=window_fn ) ) ) features = librosa.feature.mfcc(sr=sample_freq, S=S, n_mfcc=num_features, n_mels=2*num_features).T elif features_type == 'logfbank': signal = preemphasis(signal,coeff=0.97) S = np.abs(librosa.core.stft(signal, n_fft=num_fft, hop_length=int(window_stride * sample_freq), win_length=int(window_size * sample_freq), center=True, window=window_fn))**2.0 if mel_basis is None: # Build a Mel filter mel_basis = librosa.filters.mel(sample_freq, num_fft, n_mels=num_features, fmin=0, fmax=int(sample_freq/2)) features = np.log(np.dot(mel_basis, S) + 1e-20).T else: raise ValueError('Unknown features type: {}'.format(features_type)) norm_axis = 0 if norm_per_feature else None mean = np.mean(features, axis=norm_axis) std_dev = np.std(features, axis=norm_axis) features = (features - mean) / std_dev # now it is safe to pad # if pad_to > 0: # if features.shape[0] % pad_to != 0: # pad_size = pad_to - features.shape[0] % pad_to # if pad_size != 0: # features = np.pad(features, ((0,pad_size), (0,0)), mode='constant') return features, audio_duration def get_speech_features_psf(signal, sample_freq, num_features, pad_to=8, features_type='spectrogram', window_size=20e-3, window_stride=10e-3, augmentation=None): """Function to convert raw audio signal to numpy array of features. Backend: python_speech_features Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. num_features (int): number of speech features in frequency domain. pad_to (int): if specified, the length will be padded to become divisible by ``pad_to`` parameter. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. apply_window (bool): whether to apply Hann window for mfcc and logfbank. python_speech_features version should accept winfunc if it is True. Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds """ if augmentation is not None: signal = augment_audio_signal(signal, sample_freq, augmentation) else: signal = (normalize_signal(signal.astype(np.float32)) * 32767.0).astype( np.int16) audio_duration = len(signal) * 1.0 / sample_freq n_window_size = int(sample_freq * window_size) n_window_stride = int(sample_freq * window_stride) # making sure length of the audio is divisible by 8 (fp16 optimization) length = 1 + int(math.ceil( (1.0 * signal.shape[0] - n_window_size) / n_window_stride )) if pad_to > 0: if length % pad_to != 0: pad_size = (pad_to - length % pad_to) * n_window_stride signal = np.pad(signal, (0, pad_size), mode='constant') if features_type == 'spectrogram': frames = psf.sigproc.framesig(sig=signal, frame_len=n_window_size, frame_step=n_window_stride, winfunc=np.hanning) # features = np.log1p(psf.sigproc.powspec(frames, NFFT=N_window_size)) features = psf.sigproc.logpowspec(frames, NFFT=n_window_size) assert num_features <= n_window_size // 2 + 1, \ "num_features for spectrogram should be <= (sample_freq * window_size // 2 + 1)" # cut high frequency part features = features[:, :num_features] elif features_type == 'mfcc': features = psf.mfcc(signal=signal, samplerate=sample_freq, winlen=window_size, winstep=window_stride, numcep=num_features, nfilt=2 * num_features, nfft=512, lowfreq=0, highfreq=None, preemph=0.97, ceplifter=2 * num_features, appendEnergy=False) elif features_type == 'logfbank': features = psf.logfbank(signal=signal, samplerate=sample_freq, winlen=window_size, winstep=window_stride, nfilt=num_features, nfft=512, lowfreq=0, highfreq=sample_freq / 2, preemph=0.97) else: raise ValueError('Unknown features type: {}'.format(features_type)) if pad_to > 0: assert features.shape[0] % pad_to == 0 mean = np.mean(features) std_dev = np.std(features) features = (features - mean) / std_dev return features, audio_duration
en
0.685036
# Copyright (c) 2018 NVIDIA Corporation Exception that is thrown to not load preprocessed features from disk; recompute on-the-fly. This saves disk space (if you're experimenting with data input formats/preprocessing) but can be slower. The slowdown is especially apparent for small, fast NNs. Exception that is thrown to force recomputation of (preprocessed) features Function to load (preprocessed) features from disk Args: :param path: the path where the features are stored :param data_format: the format in which the features are stored :return: tuple of (features, duration) Function to save (preprocessed) features to disk Args: :param features: features :param duration: metadata: duration in seconds of audio file :param path: path to store the data :param data_format: format to store the data in ('npy', 'npz', 'hdf5') Function to convert the audio path into the path to the preprocessed version of this audio Args: :param filename: WAVE filename :param params: dictionary containing preprocessing parameters :return: path to new file (without extension). The path is generated from the relevant preprocessing parameters. # convert binary string to normal string # decode symbolic links ## filter relevant parameters # TODO is there a cleaner way of doing this? # print(list(params.keys())) Helper function to shorten length of filenames to get around filesystem path length limitations # generate the identifier by simply concatenating preprocessing key-value # pairs as strings. # create dir if it doesn't exist yet Function to get a numpy array of features, from an audio file. if params['cache_features']==True, try load preprocessed data from disk, or store after preprocesseng. else, perform preprocessing on-the-fly. Args: filename (string): WAVE filename. params (dict): the following parameters num_features (int): number of speech features in frequency domain. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. window (str): window function to apply dither (float): weight of Gaussian noise to apply to input signal for dithering/preventing quantization noise num_fft (int): size of fft window to use if features require fft, defaults to smallest power of 2 larger than window size norm_per_feature (bool): if True, the output features will be normalized (whitened) individually. if False, a global mean/std over all features will be used for normalization Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. Normalize float32 signal to [-1, 1] range Function that performs audio signal augmentation. Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. augmentation (dict, optional): None or dictionary of augmentation parameters. If not None, has to have 'speed_perturbation_ratio', 'noise_level_min', or 'noise_level_max' fields, e.g.:: augmentation={ 'speed_perturbation_ratio': 0.2, 'noise_level_min': -90, 'noise_level_max': -46, } 'speed_perturbation_ratio' can either be a list of possible speed perturbation factors or a float. If float, a random value from U[1-speed_perturbation_ratio, 1+speed_perturbation_ratio]. Returns: np.array: np.array with augmented audio signal. # time stretch (might be slow) # noise Get speech features using either librosa (recommended) or python_speech_features Args: signal (np.array): np.array containing raw audio signal sample_freq (float): sample rate of the signal params (dict): parameters of pre-processing Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds Function to convert raw audio signal to numpy array of features. Backend: librosa Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. num_features (int): number of speech features in frequency domain. pad_to (int): if specified, the length will be padded to become divisible by ``pad_to`` parameter. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds # ignore 1/n_fft multiplier, since there is a post-normalization # remove small bins # cut high frequency part # Build a Mel filter # now it is safe to pad # if pad_to > 0: # if features.shape[0] % pad_to != 0: # pad_size = pad_to - features.shape[0] % pad_to # if pad_size != 0: # features = np.pad(features, ((0,pad_size), (0,0)), mode='constant') Function to convert raw audio signal to numpy array of features. Backend: python_speech_features Args: signal (np.array): np.array containing raw audio signal. sample_freq (float): frames per second. num_features (int): number of speech features in frequency domain. pad_to (int): if specified, the length will be padded to become divisible by ``pad_to`` parameter. features_type (string): 'mfcc' or 'spectrogram'. window_size (float): size of analysis window in milli-seconds. window_stride (float): stride of analysis window in milli-seconds. augmentation (dict, optional): dictionary of augmentation parameters. See :func:`augment_audio_signal` for specification and example. apply_window (bool): whether to apply Hann window for mfcc and logfbank. python_speech_features version should accept winfunc if it is True. Returns: np.array: np.array of audio features with shape=[num_time_steps, num_features]. audio_duration (float): duration of the signal in seconds # making sure length of the audio is divisible by 8 (fp16 optimization) # features = np.log1p(psf.sigproc.powspec(frames, NFFT=N_window_size)) # cut high frequency part
2.663194
3
src/fakeformat/elements.py
roelandschoukens/fake-format-ml
0
6629776
from collections import OrderedDict from . import regexshim as _re import sys """ tags which we consider empty. This is largely the same as HTML but includes a few more. """ _EMPTY_TAGS = ( 'area', 'base', 'br', 'col', 'command', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'meta', 'param', 'source', 'track', 'wbr', 'notelink') """ This matches a 'token' which may be delimited by spaces. Exclude spaces, quotes and tag metacharacters. """ RE_TOKEN = r'[\w\p{Pd}\p{Pc}]+' if _re.has_regex else r'[\w\-]+' PAT_TOKEN = _re.compile(RE_TOKEN) """ matches 1 or more 'blank' characters """ PAT_SPACE = _re.compile(r'\p{Z}+') """ matches 1 or more spaces """ PAT_WHITESPACE = _re.compile(r'\p{Zs}+') """ matches 1 or more non-space characters """ PAT_NONSPACE = _re.compile(r'\P{Z}+') # the pattern below has the convenient property that it always yields a valid match. """ matches 0 or more spaces """ PAT_WHITESPACE_0 = _re.compile(r'\p{Zs}*') """ matches exactly 1 non-space characters """ PAT_NONSPACE_1 = _re.compile(r'\P{Z}') PAT_CMM_PUNCT = _re.compile(r'''[!"#$%&'()*+,\-./:;<=>?@[\]\^_`{|}~\p{Pc}\p{Pd}\p{Pe}\p{Pf}\p{Pi}\p{Po}\p{Ps}]''') def match_length(content, pos, pat): m = pat.match(content, pos) return len(m.group()) if m else 0 class Sentinel: """ simple class to construct arbitrary named constants. Use the 'is' operator to check """ def __init__(self, s): self.s = '['+s+']' def __str__(self): return self.s def sentinels(*args): return map(Sentinel, args) class FFElement: def __init__(self, arg): """ Make an element FFElement(tag_name): make a new element, with the given tag name and empty elements FFElement(element): make an independent copy of the given element. """ # handle copy case el, tag = None, None if isinstance(arg, FFElement): el = arg arg = None elif arg: tag = str(arg) """type of block""" self.tag = tag """ ID attribute """ self.id = None """ class attribute (as list) """ self.classes = [] """ rest of attributes as map """ self.attr = OrderedDict() """ content type """ self.verbatim = False """ is this a block or not """ self.is_block = False if el: self.merge(el) self.is_block = el.is_block def is_void(self): """ Return if this element has not tag name or attributes """ return not self.tag and not self.id and not self.classes and not self.attr def clear(self): """ Clear this tag, i.e. make self.is_void() True. """ self.tag = None; self.attr = OrderedDict() self.id = None self.classes = [] def is_empty_tag(self): return self.tag in _EMPTY_TAGS or (self.tag is None and 'src' in self.attr) def merge(self, other): """ merge attributes from other. This will merge classes and attributes from other. If given the other tag name and ID will replace those of this element. other is normally an element derived from an explicit tag """ if other.tag: self.tag = other.tag if other.id: self.id = other.id self.classes += other.classes self.attr.update(other.attr) def __str__(self): s = [] if self.tag: s.append(self.tag) if self.id: s.append('#' + self.id) for c in self.classes: s.append('.' + c) return '{' + ' '.join(s) + '}' def __repr__(self): s = [] if self.tag: s.append(self.tag) if self.id: s.append('#' + self.id) for c in self.classes: s.append('.' + c) for k, v in self.attr.items(): s.append(k + '="' + str(v) + '"') return type(self).__name__ + '(' + ' '.join(s) + ')' class FFBlock(FFElement): def __init__(self, tag, indent=''): super().__init__(tag) """ indent """ self.indent = indent """ indent, with spaces only """ if indent is not None: self.space_indent = PAT_NONSPACE_1.sub(' ', indent) """ if True any text must be enclosed in a P """ self.must_have_p = True """ is this a block or not """ self.is_block = True def set_indent(self, indent, space_indent): """ Update indent. This always updates indent and space_indent in tandem """ self.indent = indent self.space_indent = space_indent def match_blank(self, l): """ Check if this line is considered whitespace """ assert self.indent is not None, 'Oops, indent is None (tag ' + self.tag + ' and line '+l+')' return self.indent.startswith(l) or self.space_indent.startswith(l) def match_indent(self, l): """Check if this line starts with our indent. If not this block is terminated""" return l.startswith(self.indent) or l.startswith(self.space_indent) def match_indent_prefix(self, l, prefix): """Check if this line starts with our indent, plus an extra prefix. If not this block is terminated""" return l.startswith(self.indent + prefix) or l.startswith(self.space_indent + prefix) def strip_indent(self, l): """Strip the indent off the given line. This happens blindly, so call match_indent first """ return l[len(self.indent):] # def parse_hook(self, parser, line) # if present, this will be called before the usual parsing of a line. # l shall not have any whitespace stripped, so often you actually need self.strip_indent(line) # returns: true if this line was consumed, false if normal parsing will happen # If defined this is called for any line, including blanks, unless a block higher in the stack # consumes the line in its own parse_hook routine. class FF_Root(FFBlock): def __init__(self): super().__init__('fakeformat', '') def match_indent(self, l): return True class FF_P(FFBlock): def __init__(self, indent): super().__init__('p', indent) self.must_have_p = False
from collections import OrderedDict from . import regexshim as _re import sys """ tags which we consider empty. This is largely the same as HTML but includes a few more. """ _EMPTY_TAGS = ( 'area', 'base', 'br', 'col', 'command', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'meta', 'param', 'source', 'track', 'wbr', 'notelink') """ This matches a 'token' which may be delimited by spaces. Exclude spaces, quotes and tag metacharacters. """ RE_TOKEN = r'[\w\p{Pd}\p{Pc}]+' if _re.has_regex else r'[\w\-]+' PAT_TOKEN = _re.compile(RE_TOKEN) """ matches 1 or more 'blank' characters """ PAT_SPACE = _re.compile(r'\p{Z}+') """ matches 1 or more spaces """ PAT_WHITESPACE = _re.compile(r'\p{Zs}+') """ matches 1 or more non-space characters """ PAT_NONSPACE = _re.compile(r'\P{Z}+') # the pattern below has the convenient property that it always yields a valid match. """ matches 0 or more spaces """ PAT_WHITESPACE_0 = _re.compile(r'\p{Zs}*') """ matches exactly 1 non-space characters """ PAT_NONSPACE_1 = _re.compile(r'\P{Z}') PAT_CMM_PUNCT = _re.compile(r'''[!"#$%&'()*+,\-./:;<=>?@[\]\^_`{|}~\p{Pc}\p{Pd}\p{Pe}\p{Pf}\p{Pi}\p{Po}\p{Ps}]''') def match_length(content, pos, pat): m = pat.match(content, pos) return len(m.group()) if m else 0 class Sentinel: """ simple class to construct arbitrary named constants. Use the 'is' operator to check """ def __init__(self, s): self.s = '['+s+']' def __str__(self): return self.s def sentinels(*args): return map(Sentinel, args) class FFElement: def __init__(self, arg): """ Make an element FFElement(tag_name): make a new element, with the given tag name and empty elements FFElement(element): make an independent copy of the given element. """ # handle copy case el, tag = None, None if isinstance(arg, FFElement): el = arg arg = None elif arg: tag = str(arg) """type of block""" self.tag = tag """ ID attribute """ self.id = None """ class attribute (as list) """ self.classes = [] """ rest of attributes as map """ self.attr = OrderedDict() """ content type """ self.verbatim = False """ is this a block or not """ self.is_block = False if el: self.merge(el) self.is_block = el.is_block def is_void(self): """ Return if this element has not tag name or attributes """ return not self.tag and not self.id and not self.classes and not self.attr def clear(self): """ Clear this tag, i.e. make self.is_void() True. """ self.tag = None; self.attr = OrderedDict() self.id = None self.classes = [] def is_empty_tag(self): return self.tag in _EMPTY_TAGS or (self.tag is None and 'src' in self.attr) def merge(self, other): """ merge attributes from other. This will merge classes and attributes from other. If given the other tag name and ID will replace those of this element. other is normally an element derived from an explicit tag """ if other.tag: self.tag = other.tag if other.id: self.id = other.id self.classes += other.classes self.attr.update(other.attr) def __str__(self): s = [] if self.tag: s.append(self.tag) if self.id: s.append('#' + self.id) for c in self.classes: s.append('.' + c) return '{' + ' '.join(s) + '}' def __repr__(self): s = [] if self.tag: s.append(self.tag) if self.id: s.append('#' + self.id) for c in self.classes: s.append('.' + c) for k, v in self.attr.items(): s.append(k + '="' + str(v) + '"') return type(self).__name__ + '(' + ' '.join(s) + ')' class FFBlock(FFElement): def __init__(self, tag, indent=''): super().__init__(tag) """ indent """ self.indent = indent """ indent, with spaces only """ if indent is not None: self.space_indent = PAT_NONSPACE_1.sub(' ', indent) """ if True any text must be enclosed in a P """ self.must_have_p = True """ is this a block or not """ self.is_block = True def set_indent(self, indent, space_indent): """ Update indent. This always updates indent and space_indent in tandem """ self.indent = indent self.space_indent = space_indent def match_blank(self, l): """ Check if this line is considered whitespace """ assert self.indent is not None, 'Oops, indent is None (tag ' + self.tag + ' and line '+l+')' return self.indent.startswith(l) or self.space_indent.startswith(l) def match_indent(self, l): """Check if this line starts with our indent. If not this block is terminated""" return l.startswith(self.indent) or l.startswith(self.space_indent) def match_indent_prefix(self, l, prefix): """Check if this line starts with our indent, plus an extra prefix. If not this block is terminated""" return l.startswith(self.indent + prefix) or l.startswith(self.space_indent + prefix) def strip_indent(self, l): """Strip the indent off the given line. This happens blindly, so call match_indent first """ return l[len(self.indent):] # def parse_hook(self, parser, line) # if present, this will be called before the usual parsing of a line. # l shall not have any whitespace stripped, so often you actually need self.strip_indent(line) # returns: true if this line was consumed, false if normal parsing will happen # If defined this is called for any line, including blanks, unless a block higher in the stack # consumes the line in its own parse_hook routine. class FF_Root(FFBlock): def __init__(self): super().__init__('fakeformat', '') def match_indent(self, l): return True class FF_P(FFBlock): def __init__(self, indent): super().__init__('p', indent) self.must_have_p = False
en
0.813697
tags which we consider empty. This is largely the same as HTML but includes a few more. This matches a 'token' which may be delimited by spaces. Exclude spaces, quotes and tag metacharacters. matches 1 or more 'blank' characters matches 1 or more spaces matches 1 or more non-space characters # the pattern below has the convenient property that it always yields a valid match. matches 0 or more spaces matches exactly 1 non-space characters [!"#$%&'()*+,\-./:;<=>?@[\]\^_`{|}~\p{Pc}\p{Pd}\p{Pe}\p{Pf}\p{Pi}\p{Po}\p{Ps}] simple class to construct arbitrary named constants. Use the 'is' operator to check Make an element FFElement(tag_name): make a new element, with the given tag name and empty elements FFElement(element): make an independent copy of the given element. # handle copy case type of block ID attribute class attribute (as list) rest of attributes as map content type is this a block or not Return if this element has not tag name or attributes Clear this tag, i.e. make self.is_void() True. merge attributes from other. This will merge classes and attributes from other. If given the other tag name and ID will replace those of this element. other is normally an element derived from an explicit tag indent indent, with spaces only if True any text must be enclosed in a P is this a block or not Update indent. This always updates indent and space_indent in tandem Check if this line is considered whitespace Check if this line starts with our indent. If not this block is terminated Check if this line starts with our indent, plus an extra prefix. If not this block is terminated Strip the indent off the given line. This happens blindly, so call match_indent first # def parse_hook(self, parser, line) # if present, this will be called before the usual parsing of a line. # l shall not have any whitespace stripped, so often you actually need self.strip_indent(line) # returns: true if this line was consumed, false if normal parsing will happen # If defined this is called for any line, including blanks, unless a block higher in the stack # consumes the line in its own parse_hook routine.
3.05339
3
oppertions/08.py
mallimuondu/python-practice
0
6629777
i=1 while i<6 print(i) i += 1
i=1 while i<6 print(i) i += 1
none
1
2.927403
3
django_tenants_q/custom.py
chaitanyadevle/django_tenants_q
0
6629778
# local from django_tenants_q.utils import QUtilities from django_q.conf import Conf from django_q.brokers import get_broker class Iter(object): """ An async task with iterable arguments customised for django_tenants_q """ def __init__( self, func=None, args=None, kwargs=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None, ): self.func = func self.args = args or [] self.kwargs = kwargs or {} self.id = "" self.broker = broker or get_broker() self.cached = cached self.sync = sync self.started = False def append(self, *args): """ add arguments to the set """ self.args.append(args) if self.started: self.started = False return self.length() def run(self): """ Start queueing the tasks to the worker cluster :return: the task id """ self.kwargs["cached"] = self.cached self.kwargs["sync"] = self.sync self.kwargs["broker"] = self.broker self.id = QUtilities.add_async_tasks_from_iter(self.func, self.args, **self.kwargs) self.started = True return self.id def result(self, wait=0): """ return the full list of results. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results """ if self.started: return QUtilities.get_result(self.id, wait=wait, cached=self.cached) def fetch(self, wait=0): """ get the task result objects. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects """ if self.started: return QUtilities.fetch_task(self.id, wait=wait, cached=self.cached) def length(self): """ get the length of the arguments list :return int: length of the argument list """ return len(self.args) class Chain(object): """ A sequential chain of tasks """ def __init__(self, chain=None, group=None, cached=Conf.CACHED, sync=Conf.SYNC): self.chain = chain or [] self.group = group or "" self.broker = get_broker() self.cached = cached self.sync = sync self.started = False def append(self, func, *args, **kwargs): """ add a task to the chain takes the same parameters as async_task() """ self.chain.append((func, args, kwargs)) # remove existing results if self.started: QUtilities.delete_task_group(self.group) self.started = False return self.length() def run(self): """ Start queueing the chain to the worker cluster :return: the chain's group id """ self.group = QUtilities.create_async_tasks_chain( chain=self.chain[:], group=self.group, cached=self.cached, sync=self.sync, broker=self.broker, ) self.started = True return self.group def result(self, wait=0): """ return the full list of results from the chain when it finishes. blocks until timeout. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results """ if self.started: return QUtilities.get_result_group( self.group, wait=wait, count=self.length(), cached=self.cached ) def fetch(self, failures=True, wait=0): """ get the task result objects from the chain when it finishes. blocks until timeout. :param failures: include failed tasks :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects """ if self.started: return QUtilities.fetch_task_group( self.group, failures=failures, wait=wait, count=self.length(), cached=self.cached, ) def current(self): """ get the index of the currently executing chain element :return int: current chain index """ if not self.started: return None return QUtilities.get_group_count(self.group, cached=self.cached) def length(self): """ get the length of the chain :return int: length of the chain """ return len(self.chain) class AsyncTask(object): """ an async task """ def __init__(self, func, *args, **kwargs): self.id = "" self.started = False self.func = func self.args = args self.kwargs = kwargs @property def broker(self): return self._get_option("broker", None) @broker.setter def broker(self, value): self._set_option("broker", value) @property def sync(self): return self._get_option("sync", None) @sync.setter def sync(self, value): self._set_option("sync", value) @property def save(self): return self._get_option("save", None) @save.setter def save(self, value): self._set_option("save", value) @property def hook(self): return self._get_option("hook", None) @hook.setter def hook(self, value): self._set_option("hook", value) @property def group(self): return self._get_option("group", None) @group.setter def group(self, value): self._set_option("group", value) @property def cached(self): return self._get_option("cached", Conf.CACHED) @cached.setter def cached(self, value): self._set_option("cached", value) def _set_option(self, key, value): if "q_options" in self.kwargs: self.kwargs["q_options"][key] = value else: self.kwargs[key] = value self.started = False def _get_option(self, key, default=None): if "q_options" in self.kwargs: return self.kwargs["q_options"].get(key, default) else: return self.kwargs.get(key, default) def run(self): self.id = QUtilities.add_async_task(self.func, *self.args, **self.kwargs) self.started = True return self.id def result(self, wait=0): if self.started: return QUtilities.get_result(self.id, wait=wait, cached=self.cached) def fetch(self, wait=0): if self.started: return QUtilities.fetch_task(self.id, wait=wait, cached=self.cached) def result_group(self, failures=False, wait=0, count=None): if self.started and self.group: return QUtilities.get_result_group( self.group, failures=failures, wait=wait, count=count, cached=self.cached, ) def fetch_group(self, failures=True, wait=0, count=None): if self.started and self.group: return QUtilities.fetch_task_group( self.group, failures=failures, wait=wait, count=count, cached=self.cached, )
# local from django_tenants_q.utils import QUtilities from django_q.conf import Conf from django_q.brokers import get_broker class Iter(object): """ An async task with iterable arguments customised for django_tenants_q """ def __init__( self, func=None, args=None, kwargs=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None, ): self.func = func self.args = args or [] self.kwargs = kwargs or {} self.id = "" self.broker = broker or get_broker() self.cached = cached self.sync = sync self.started = False def append(self, *args): """ add arguments to the set """ self.args.append(args) if self.started: self.started = False return self.length() def run(self): """ Start queueing the tasks to the worker cluster :return: the task id """ self.kwargs["cached"] = self.cached self.kwargs["sync"] = self.sync self.kwargs["broker"] = self.broker self.id = QUtilities.add_async_tasks_from_iter(self.func, self.args, **self.kwargs) self.started = True return self.id def result(self, wait=0): """ return the full list of results. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results """ if self.started: return QUtilities.get_result(self.id, wait=wait, cached=self.cached) def fetch(self, wait=0): """ get the task result objects. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects """ if self.started: return QUtilities.fetch_task(self.id, wait=wait, cached=self.cached) def length(self): """ get the length of the arguments list :return int: length of the argument list """ return len(self.args) class Chain(object): """ A sequential chain of tasks """ def __init__(self, chain=None, group=None, cached=Conf.CACHED, sync=Conf.SYNC): self.chain = chain or [] self.group = group or "" self.broker = get_broker() self.cached = cached self.sync = sync self.started = False def append(self, func, *args, **kwargs): """ add a task to the chain takes the same parameters as async_task() """ self.chain.append((func, args, kwargs)) # remove existing results if self.started: QUtilities.delete_task_group(self.group) self.started = False return self.length() def run(self): """ Start queueing the chain to the worker cluster :return: the chain's group id """ self.group = QUtilities.create_async_tasks_chain( chain=self.chain[:], group=self.group, cached=self.cached, sync=self.sync, broker=self.broker, ) self.started = True return self.group def result(self, wait=0): """ return the full list of results from the chain when it finishes. blocks until timeout. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results """ if self.started: return QUtilities.get_result_group( self.group, wait=wait, count=self.length(), cached=self.cached ) def fetch(self, failures=True, wait=0): """ get the task result objects from the chain when it finishes. blocks until timeout. :param failures: include failed tasks :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects """ if self.started: return QUtilities.fetch_task_group( self.group, failures=failures, wait=wait, count=self.length(), cached=self.cached, ) def current(self): """ get the index of the currently executing chain element :return int: current chain index """ if not self.started: return None return QUtilities.get_group_count(self.group, cached=self.cached) def length(self): """ get the length of the chain :return int: length of the chain """ return len(self.chain) class AsyncTask(object): """ an async task """ def __init__(self, func, *args, **kwargs): self.id = "" self.started = False self.func = func self.args = args self.kwargs = kwargs @property def broker(self): return self._get_option("broker", None) @broker.setter def broker(self, value): self._set_option("broker", value) @property def sync(self): return self._get_option("sync", None) @sync.setter def sync(self, value): self._set_option("sync", value) @property def save(self): return self._get_option("save", None) @save.setter def save(self, value): self._set_option("save", value) @property def hook(self): return self._get_option("hook", None) @hook.setter def hook(self, value): self._set_option("hook", value) @property def group(self): return self._get_option("group", None) @group.setter def group(self, value): self._set_option("group", value) @property def cached(self): return self._get_option("cached", Conf.CACHED) @cached.setter def cached(self, value): self._set_option("cached", value) def _set_option(self, key, value): if "q_options" in self.kwargs: self.kwargs["q_options"][key] = value else: self.kwargs[key] = value self.started = False def _get_option(self, key, default=None): if "q_options" in self.kwargs: return self.kwargs["q_options"].get(key, default) else: return self.kwargs.get(key, default) def run(self): self.id = QUtilities.add_async_task(self.func, *self.args, **self.kwargs) self.started = True return self.id def result(self, wait=0): if self.started: return QUtilities.get_result(self.id, wait=wait, cached=self.cached) def fetch(self, wait=0): if self.started: return QUtilities.fetch_task(self.id, wait=wait, cached=self.cached) def result_group(self, failures=False, wait=0, count=None): if self.started and self.group: return QUtilities.get_result_group( self.group, failures=failures, wait=wait, count=count, cached=self.cached, ) def fetch_group(self, failures=True, wait=0, count=None): if self.started and self.group: return QUtilities.fetch_task_group( self.group, failures=failures, wait=wait, count=count, cached=self.cached, )
en
0.772941
# local An async task with iterable arguments customised for django_tenants_q add arguments to the set Start queueing the tasks to the worker cluster :return: the task id return the full list of results. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results get the task result objects. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects get the length of the arguments list :return int: length of the argument list A sequential chain of tasks add a task to the chain takes the same parameters as async_task() # remove existing results Start queueing the chain to the worker cluster :return: the chain's group id return the full list of results from the chain when it finishes. blocks until timeout. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results get the task result objects from the chain when it finishes. blocks until timeout. :param failures: include failed tasks :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects get the index of the currently executing chain element :return int: current chain index get the length of the chain :return int: length of the chain an async task
2.347042
2
tensorflow/lite/experimental/examples/lstm/tflite_lstm.py
khodges42/tensorflow
3
6629779
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TfLite LSTMCell wrapper. TODO(renjieliu): Find a better home for this one. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.lite.python import lite from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.keras import activations from tensorflow.python.keras import initializers from tensorflow.python.layers import base as base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.rnn import _best_effort_input_batch_size from tensorflow.python.ops.rnn import _dynamic_rnn_loop from tensorflow.python.ops.rnn import _should_cache from tensorflow.python.ops.rnn import _transpose_batch_time from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell): """Long short-term memory unit (LSTM) recurrent network cell. This is used only for TfLite, it provides hints and it also makes the variables in the desired for the tflite ops (transposed and seaparated). The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf <NAME>, <NAME>, and <NAME>. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf <NAME>, <NAME>, and <NAME>. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. """ def __init__(self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=None, num_proj_shards=None, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None, name=None, dtype=None): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. Must set it manually to `0.0` when restoring from CudnnLSTM trained checkpoints. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. When restoring from CudnnLSTM-trained checkpoints, use `CudnnCompatibleLSTMCell` instead. """ super(TFLiteLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype) # TODO(raziel): decide if we want to just support tuples (yes please!). if not state_is_tuple: logging.warn( "%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if num_unit_shards is not None or num_proj_shards is not None: logging.warn( "%s: The num_unit_shards and proj_unit_shards parameters are " "deprecated and will be removed in Jan 2017. " "Use a variable scope with a partitioner instead.", self) # Inputs must be 2-dimensional. # TODO(raziel): layers stuff -- chop if un-layerizing Op. self.input_spec = base_layer.InputSpec(ndim=2) self._tflite_wrapper = lite.OpHint("UnidirectionalSequenceLstm") self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation or math_ops.tanh self._output_size = num_proj if num_proj else num_units self._state_size = ( tf.nn.rnn_cell.LSTMStateTuple(num_units, self._output_size) if state_is_tuple else num_units + self._output_size) @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def build(self, inputs_shape): """Build TfLite LSTM cell graph. Args: inputs_shape: The inputs_shape must be known, and is [batch_size, input_size] shape. Raises: ValueError: if the inputs_shape is invalid. """ if len(inputs_shape) != 2 or inputs_shape[1].value is None: raise ValueError("Invalid inputs_shape, saw shape: %s" % inputs_shape) input_depth = inputs_shape[1].value maybe_partitioner = ( partitioned_variables.fixed_size_partitioner(self._num_unit_shards) if self._num_unit_shards is not None else None) input_weight_shape = [self._num_units, input_depth] cell_weight_shape = [self._num_units, self._output_size] bias_shape = [self._num_units] def add_variable_wrapped(name, shape, initializer, index, partitioner): var = self.add_variable( name, shape=shape, initializer=initializer, partitioner=partitioner) return self._tflite_wrapper.add_input( var, name=name, index_override=index) weight_initializer = self._initializer if self.dtype is None: bias_initializer = init_ops.zeros_initializer else: bias_initializer = init_ops.zeros_initializer(dtype=self.dtype) self.input_to_input_w = add_variable_wrapped( "input_to_input_w", input_weight_shape, weight_initializer, 1, maybe_partitioner) self.input_to_forget_w = add_variable_wrapped( "input_to_forget_w", input_weight_shape, weight_initializer, 2, maybe_partitioner) self.input_to_cell_w = add_variable_wrapped( "input_to_cell_w", input_weight_shape, weight_initializer, 3, maybe_partitioner) self.input_to_output_w = add_variable_wrapped( "input_to_output_w", input_weight_shape, weight_initializer, 4, maybe_partitioner) self.cell_to_input_w = add_variable_wrapped( "cell_to_input_w", cell_weight_shape, weight_initializer, 5, maybe_partitioner) self.cell_to_forget_w = add_variable_wrapped( "cell_to_forget_w", cell_weight_shape, weight_initializer, 6, maybe_partitioner) self.cell_to_cell_w = add_variable_wrapped( "cell_to_cell_w", cell_weight_shape, weight_initializer, 7, maybe_partitioner) self.cell_to_output_w = add_variable_wrapped( "cell_to_output_w", cell_weight_shape, weight_initializer, 8, maybe_partitioner) self.input_bias = add_variable_wrapped( "input_bias", bias_shape, bias_initializer, 12, maybe_partitioner) self.forget_bias = add_variable_wrapped( "forget_bias", bias_shape, bias_initializer, 13, maybe_partitioner) self.cell_bias = add_variable_wrapped( "cell_bias", bias_shape, bias_initializer, 14, maybe_partitioner) self.output_bias = add_variable_wrapped( "output_bias", bias_shape, bias_initializer, 15, maybe_partitioner) # index 9, 10, 11. # f stands for forget, i stands for input and o stands for output. if self._use_peepholes: self._w_f_diag = add_variable_wrapped("w_f_diag", [self._num_units], self._initializer, 10, maybe_partitioner) self._w_i_diag = add_variable_wrapped("w_i_diag", [self._num_units], self._initializer, 9, maybe_partitioner) self._w_o_diag = add_variable_wrapped("w_o_diag", [self._num_units], self._initializer, 11, maybe_partitioner) # index 16 for proj kernel. if self._num_proj is not None: maybe_proj_partitioner = ( partitioned_variables.fixed_size_partitioner(self._num_proj_shards) if self._num_proj_shards is not None else None) self._proj_kernel = add_variable_wrapped( "projection/kernel", [self._num_proj, self._num_units], self._initializer, 16, partitioner=maybe_proj_partitioner) self.built = True def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, `[batch, num_units]`. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch, output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ inputs = self._tflite_wrapper.add_input( inputs, tag="input", name="input", aggregate="stack", index_override=0) # Make sure inputs and bias_initializer has the same type. assert inputs.dtype == self.input_to_input_w.dtype num_proj = self._num_units if self._num_proj is None else self._num_proj sigmoid = math_ops.sigmoid if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) # Note: For TfLite, cell_state is at index 19 while activation state at # index 18. c_prev = self._tflite_wrapper.add_input( c_prev, tag="c_prev", name="c_prev", aggregate="first", index_override=19) m_prev = self._tflite_wrapper.add_input( m_prev, tag="m_prev", name="m_prev", aggregate="first", index_override=18) input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") inputs_and_m_prev = array_ops.concat([inputs, m_prev], axis=1) # i stands for input gate. # f stands for forget gate activation. # o outputs. # j output of LSTM unit. # c is the final state. # m is the output. i = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_input_w, self.cell_to_input_w], axis=1), transpose_b=True), self.input_bias) f = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_forget_w, self.cell_to_forget_w], axis=1), transpose_b=True), self.forget_bias) o = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_output_w, self.cell_to_output_w], axis=1), transpose_b=True), self.output_bias) j = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_cell_w, self.cell_to_cell_w], axis=1), transpose_b=True), self.cell_bias) # Diagonal connections if self._use_peepholes: c = ( sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * self._activation(j)) else: c = ( sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + self._w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: transposed_proj_kernel = tf.transpose(self._proj_kernel) m = math_ops.matmul(m, transposed_proj_kernel) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type c = self._tflite_wrapper.add_output( c, tag="c", name="c", aggregate="last", index_override=1) m = self._tflite_wrapper.add_output( m, tag="m", name="m", index_override=2, aggregate="stack") new_state = ( tf.nn.rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat([c, m], 1)) return m, new_state def get_config(self): config = { "num_units": self._num_units, "use_peepholes": self._use_peepholes, "cell_clip": self._cell_clip, "initializer": initializers.serialize(self._initializer), "num_proj": self._num_proj, "proj_clip": self._proj_clip, "num_unit_shards": self._num_unit_shards, "num_proj_shards": self._num_proj_shards, "forget_bias": self._forget_bias, "state_is_tuple": self._state_is_tuple, "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(TFLiteLSTMCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=True, scope=None): """Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. Example: ```python # create a BasicRNNCell rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size) # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size] # defining initial state initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) # 'state' is a tensor of shape [batch_size, cell_state_size] outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data, initial_state=initial_state, dtype=tf.float32) ``` ```python # create 2 LSTMCells rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]] # create a RNN cell composed sequentially of a number of RNNCells multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) # 'outputs' is a tensor of shape [batch_size, max_time, 256] # 'state' is a N-tuple where N is the number of LSTMCells containing a # tf.contrib.rnn.LSTMStateTuple for each cell outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=data, dtype=tf.float32) ``` Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for performance than correctness. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. If cells are `LSTMCells` `state` will be a tuple containing a `LSTMStateTuple` for each cell. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list. RuntimeError: If not using control flow v2. """ # Currently only support time_major == True case. assert time_major # TODO(b/123051275): We need to check if the cells are TfLiteLSTMCells or # TfLiteRNNCells. rnn_cell_impl.assert_like_rnncell("cell", cell) if not control_flow_util.ENABLE_CONTROL_FLOW_V2: raise RuntimeError("OpHint dynamic rnn only supports control flow v2.") parent_first_child_input = [{ "parent_ophint_input_index": 0, "first_child_ophint_input_index": 0 }] parent_last_child_output = [{ "parent_output_index": 0, # For LstmCell, the index is 2. # For RnnCell, the index is 1. # So we use -1 meaning it's the last one. "child_output_index": -1 }] internal_children_input_output = [{ "child_input_index": 0, # For LstmCell, the index is 2. # For RnnCell, the index is 1. # So we use -1 meaning it's the last one. "child_output_index": -1 }] inputs_outputs_mappings = { "parent_first_child_input": parent_first_child_input, "parent_last_child_output": parent_last_child_output, "internal_children_input_output": internal_children_input_output } tflite_wrapper = lite.OpHint( "TfLiteDynamicRnn", level=2, children_inputs_mappings=inputs_outputs_mappings) with vs.variable_scope(scope or "rnn") as varscope: # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. if _should_cache(): if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) inputs = tflite_wrapper.add_input(inputs, name="input", index_override=0) # By default, time_major==False and inputs are batch-major: shaped # [batch, time, depth] # For internal calculations, we transpose to [time, batch, depth] flat_input = nest.flatten(inputs) if not time_major: # (batch, time, depth) => (time, batch, depth) flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input] flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input) parallel_iterations = parallel_iterations or 32 if sequence_length is not None: sequence_length = math_ops.to_int32(sequence_length) if sequence_length.get_shape().rank not in (None, 1): raise ValueError( "sequence_length must be a vector of length batch_size, " "but saw shape: %s" % sequence_length.get_shape()) sequence_length = array_ops.identity( # Just to find it in the graph. sequence_length, name="sequence_length") batch_size = _best_effort_input_batch_size(flat_input) if initial_state is not None: state = initial_state else: if not dtype: raise ValueError("If there is no initial_state, you must give a dtype.") if getattr(cell, "get_initial_state", None) is not None: state = cell.get_initial_state( inputs=None, batch_size=batch_size, dtype=dtype) else: state = cell.zero_state(batch_size, dtype) def _assert_has_shape(x, shape): x_shape = array_ops.shape(x) packed_shape = array_ops.stack(shape) return control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [ "Expected shape for Tensor %s is " % x.name, packed_shape, " but saw shape: ", x_shape ]) if not context.executing_eagerly() and sequence_length is not None: # Perform some shape validation with ops.control_dependencies( [_assert_has_shape(sequence_length, [batch_size])]): sequence_length = array_ops.identity( sequence_length, name="CheckSeqLen") inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input) outputs, final_state = _dynamic_rnn_loop( cell, inputs, state, parallel_iterations=parallel_iterations, swap_memory=swap_memory, sequence_length=sequence_length, dtype=dtype) # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth]. # If we are performing batch-major calculations, transpose output back # to shape [batch, time, depth] if not time_major: # (time, batch, depth) => (batch, time, depth) outputs = nest.map_structure(_transpose_batch_time, outputs) outputs = tflite_wrapper.add_output(outputs, name="outputs") return outputs, final_state
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TfLite LSTMCell wrapper. TODO(renjieliu): Find a better home for this one. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.lite.python import lite from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.keras import activations from tensorflow.python.keras import initializers from tensorflow.python.layers import base as base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.rnn import _best_effort_input_batch_size from tensorflow.python.ops.rnn import _dynamic_rnn_loop from tensorflow.python.ops.rnn import _should_cache from tensorflow.python.ops.rnn import _transpose_batch_time from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell): """Long short-term memory unit (LSTM) recurrent network cell. This is used only for TfLite, it provides hints and it also makes the variables in the desired for the tflite ops (transposed and seaparated). The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf <NAME>, <NAME>, and <NAME>. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf <NAME>, <NAME>, and <NAME>. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. """ def __init__(self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=None, num_proj_shards=None, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None, name=None, dtype=None): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. Must set it manually to `0.0` when restoring from CudnnLSTM trained checkpoints. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. When restoring from CudnnLSTM-trained checkpoints, use `CudnnCompatibleLSTMCell` instead. """ super(TFLiteLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype) # TODO(raziel): decide if we want to just support tuples (yes please!). if not state_is_tuple: logging.warn( "%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if num_unit_shards is not None or num_proj_shards is not None: logging.warn( "%s: The num_unit_shards and proj_unit_shards parameters are " "deprecated and will be removed in Jan 2017. " "Use a variable scope with a partitioner instead.", self) # Inputs must be 2-dimensional. # TODO(raziel): layers stuff -- chop if un-layerizing Op. self.input_spec = base_layer.InputSpec(ndim=2) self._tflite_wrapper = lite.OpHint("UnidirectionalSequenceLstm") self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation or math_ops.tanh self._output_size = num_proj if num_proj else num_units self._state_size = ( tf.nn.rnn_cell.LSTMStateTuple(num_units, self._output_size) if state_is_tuple else num_units + self._output_size) @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def build(self, inputs_shape): """Build TfLite LSTM cell graph. Args: inputs_shape: The inputs_shape must be known, and is [batch_size, input_size] shape. Raises: ValueError: if the inputs_shape is invalid. """ if len(inputs_shape) != 2 or inputs_shape[1].value is None: raise ValueError("Invalid inputs_shape, saw shape: %s" % inputs_shape) input_depth = inputs_shape[1].value maybe_partitioner = ( partitioned_variables.fixed_size_partitioner(self._num_unit_shards) if self._num_unit_shards is not None else None) input_weight_shape = [self._num_units, input_depth] cell_weight_shape = [self._num_units, self._output_size] bias_shape = [self._num_units] def add_variable_wrapped(name, shape, initializer, index, partitioner): var = self.add_variable( name, shape=shape, initializer=initializer, partitioner=partitioner) return self._tflite_wrapper.add_input( var, name=name, index_override=index) weight_initializer = self._initializer if self.dtype is None: bias_initializer = init_ops.zeros_initializer else: bias_initializer = init_ops.zeros_initializer(dtype=self.dtype) self.input_to_input_w = add_variable_wrapped( "input_to_input_w", input_weight_shape, weight_initializer, 1, maybe_partitioner) self.input_to_forget_w = add_variable_wrapped( "input_to_forget_w", input_weight_shape, weight_initializer, 2, maybe_partitioner) self.input_to_cell_w = add_variable_wrapped( "input_to_cell_w", input_weight_shape, weight_initializer, 3, maybe_partitioner) self.input_to_output_w = add_variable_wrapped( "input_to_output_w", input_weight_shape, weight_initializer, 4, maybe_partitioner) self.cell_to_input_w = add_variable_wrapped( "cell_to_input_w", cell_weight_shape, weight_initializer, 5, maybe_partitioner) self.cell_to_forget_w = add_variable_wrapped( "cell_to_forget_w", cell_weight_shape, weight_initializer, 6, maybe_partitioner) self.cell_to_cell_w = add_variable_wrapped( "cell_to_cell_w", cell_weight_shape, weight_initializer, 7, maybe_partitioner) self.cell_to_output_w = add_variable_wrapped( "cell_to_output_w", cell_weight_shape, weight_initializer, 8, maybe_partitioner) self.input_bias = add_variable_wrapped( "input_bias", bias_shape, bias_initializer, 12, maybe_partitioner) self.forget_bias = add_variable_wrapped( "forget_bias", bias_shape, bias_initializer, 13, maybe_partitioner) self.cell_bias = add_variable_wrapped( "cell_bias", bias_shape, bias_initializer, 14, maybe_partitioner) self.output_bias = add_variable_wrapped( "output_bias", bias_shape, bias_initializer, 15, maybe_partitioner) # index 9, 10, 11. # f stands for forget, i stands for input and o stands for output. if self._use_peepholes: self._w_f_diag = add_variable_wrapped("w_f_diag", [self._num_units], self._initializer, 10, maybe_partitioner) self._w_i_diag = add_variable_wrapped("w_i_diag", [self._num_units], self._initializer, 9, maybe_partitioner) self._w_o_diag = add_variable_wrapped("w_o_diag", [self._num_units], self._initializer, 11, maybe_partitioner) # index 16 for proj kernel. if self._num_proj is not None: maybe_proj_partitioner = ( partitioned_variables.fixed_size_partitioner(self._num_proj_shards) if self._num_proj_shards is not None else None) self._proj_kernel = add_variable_wrapped( "projection/kernel", [self._num_proj, self._num_units], self._initializer, 16, partitioner=maybe_proj_partitioner) self.built = True def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, 2D, `[batch, num_units]`. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch, output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ inputs = self._tflite_wrapper.add_input( inputs, tag="input", name="input", aggregate="stack", index_override=0) # Make sure inputs and bias_initializer has the same type. assert inputs.dtype == self.input_to_input_w.dtype num_proj = self._num_units if self._num_proj is None else self._num_proj sigmoid = math_ops.sigmoid if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) # Note: For TfLite, cell_state is at index 19 while activation state at # index 18. c_prev = self._tflite_wrapper.add_input( c_prev, tag="c_prev", name="c_prev", aggregate="first", index_override=19) m_prev = self._tflite_wrapper.add_input( m_prev, tag="m_prev", name="m_prev", aggregate="first", index_override=18) input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") inputs_and_m_prev = array_ops.concat([inputs, m_prev], axis=1) # i stands for input gate. # f stands for forget gate activation. # o outputs. # j output of LSTM unit. # c is the final state. # m is the output. i = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_input_w, self.cell_to_input_w], axis=1), transpose_b=True), self.input_bias) f = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_forget_w, self.cell_to_forget_w], axis=1), transpose_b=True), self.forget_bias) o = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_output_w, self.cell_to_output_w], axis=1), transpose_b=True), self.output_bias) j = nn_ops.bias_add( tf.matmul( inputs_and_m_prev, tf.concat([self.input_to_cell_w, self.cell_to_cell_w], axis=1), transpose_b=True), self.cell_bias) # Diagonal connections if self._use_peepholes: c = ( sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * self._activation(j)) else: c = ( sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + self._w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: transposed_proj_kernel = tf.transpose(self._proj_kernel) m = math_ops.matmul(m, transposed_proj_kernel) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type c = self._tflite_wrapper.add_output( c, tag="c", name="c", aggregate="last", index_override=1) m = self._tflite_wrapper.add_output( m, tag="m", name="m", index_override=2, aggregate="stack") new_state = ( tf.nn.rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat([c, m], 1)) return m, new_state def get_config(self): config = { "num_units": self._num_units, "use_peepholes": self._use_peepholes, "cell_clip": self._cell_clip, "initializer": initializers.serialize(self._initializer), "num_proj": self._num_proj, "proj_clip": self._proj_clip, "num_unit_shards": self._num_unit_shards, "num_proj_shards": self._num_proj_shards, "forget_bias": self._forget_bias, "state_is_tuple": self._state_is_tuple, "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(TFLiteLSTMCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=True, scope=None): """Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. Example: ```python # create a BasicRNNCell rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size) # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size] # defining initial state initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) # 'state' is a tensor of shape [batch_size, cell_state_size] outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data, initial_state=initial_state, dtype=tf.float32) ``` ```python # create 2 LSTMCells rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]] # create a RNN cell composed sequentially of a number of RNNCells multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) # 'outputs' is a tensor of shape [batch_size, max_time, 256] # 'state' is a N-tuple where N is the number of LSTMCells containing a # tf.contrib.rnn.LSTMStateTuple for each cell outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=data, dtype=tf.float32) ``` Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for performance than correctness. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. If cells are `LSTMCells` `state` will be a tuple containing a `LSTMStateTuple` for each cell. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list. RuntimeError: If not using control flow v2. """ # Currently only support time_major == True case. assert time_major # TODO(b/123051275): We need to check if the cells are TfLiteLSTMCells or # TfLiteRNNCells. rnn_cell_impl.assert_like_rnncell("cell", cell) if not control_flow_util.ENABLE_CONTROL_FLOW_V2: raise RuntimeError("OpHint dynamic rnn only supports control flow v2.") parent_first_child_input = [{ "parent_ophint_input_index": 0, "first_child_ophint_input_index": 0 }] parent_last_child_output = [{ "parent_output_index": 0, # For LstmCell, the index is 2. # For RnnCell, the index is 1. # So we use -1 meaning it's the last one. "child_output_index": -1 }] internal_children_input_output = [{ "child_input_index": 0, # For LstmCell, the index is 2. # For RnnCell, the index is 1. # So we use -1 meaning it's the last one. "child_output_index": -1 }] inputs_outputs_mappings = { "parent_first_child_input": parent_first_child_input, "parent_last_child_output": parent_last_child_output, "internal_children_input_output": internal_children_input_output } tflite_wrapper = lite.OpHint( "TfLiteDynamicRnn", level=2, children_inputs_mappings=inputs_outputs_mappings) with vs.variable_scope(scope or "rnn") as varscope: # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. if _should_cache(): if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) inputs = tflite_wrapper.add_input(inputs, name="input", index_override=0) # By default, time_major==False and inputs are batch-major: shaped # [batch, time, depth] # For internal calculations, we transpose to [time, batch, depth] flat_input = nest.flatten(inputs) if not time_major: # (batch, time, depth) => (time, batch, depth) flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input] flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input) parallel_iterations = parallel_iterations or 32 if sequence_length is not None: sequence_length = math_ops.to_int32(sequence_length) if sequence_length.get_shape().rank not in (None, 1): raise ValueError( "sequence_length must be a vector of length batch_size, " "but saw shape: %s" % sequence_length.get_shape()) sequence_length = array_ops.identity( # Just to find it in the graph. sequence_length, name="sequence_length") batch_size = _best_effort_input_batch_size(flat_input) if initial_state is not None: state = initial_state else: if not dtype: raise ValueError("If there is no initial_state, you must give a dtype.") if getattr(cell, "get_initial_state", None) is not None: state = cell.get_initial_state( inputs=None, batch_size=batch_size, dtype=dtype) else: state = cell.zero_state(batch_size, dtype) def _assert_has_shape(x, shape): x_shape = array_ops.shape(x) packed_shape = array_ops.stack(shape) return control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [ "Expected shape for Tensor %s is " % x.name, packed_shape, " but saw shape: ", x_shape ]) if not context.executing_eagerly() and sequence_length is not None: # Perform some shape validation with ops.control_dependencies( [_assert_has_shape(sequence_length, [batch_size])]): sequence_length = array_ops.identity( sequence_length, name="CheckSeqLen") inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input) outputs, final_state = _dynamic_rnn_loop( cell, inputs, state, parallel_iterations=parallel_iterations, swap_memory=swap_memory, sequence_length=sequence_length, dtype=dtype) # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth]. # If we are performing batch-major calculations, transpose output back # to shape [batch, time, depth] if not time_major: # (time, batch, depth) => (batch, time, depth) outputs = nest.map_structure(_transpose_batch_time, outputs) outputs = tflite_wrapper.add_output(outputs, name="outputs") return outputs, final_state
en
0.79447
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== TfLite LSTMCell wrapper. TODO(renjieliu): Find a better home for this one. Long short-term memory unit (LSTM) recurrent network cell. This is used only for TfLite, it provides hints and it also makes the variables in the desired for the tflite ops (transposed and seaparated). The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf <NAME>, <NAME>, and <NAME>. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf <NAME>, <NAME>, and <NAME>. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. Must set it manually to `0.0` when restoring from CudnnLSTM trained checkpoints. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. When restoring from CudnnLSTM-trained checkpoints, use `CudnnCompatibleLSTMCell` instead. # TODO(raziel): decide if we want to just support tuples (yes please!). # Inputs must be 2-dimensional. # TODO(raziel): layers stuff -- chop if un-layerizing Op. Build TfLite LSTM cell graph. Args: inputs_shape: The inputs_shape must be known, and is [batch_size, input_size] shape. Raises: ValueError: if the inputs_shape is invalid. # index 9, 10, 11. # f stands for forget, i stands for input and o stands for output. # index 16 for proj kernel. Run one step of LSTM. Args: inputs: input Tensor, 2D, `[batch, num_units]`. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch, output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. # Make sure inputs and bias_initializer has the same type. # Note: For TfLite, cell_state is at index 19 while activation state at # index 18. # i stands for input gate. # f stands for forget gate activation. # o outputs. # j output of LSTM unit. # c is the final state. # m is the output. # Diagonal connections # pylint: disable=invalid-unary-operand-type # pylint: enable=invalid-unary-operand-type # pylint: disable=invalid-unary-operand-type # pylint: enable=invalid-unary-operand-type Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. Example: ```python # create a BasicRNNCell rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size) # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size] # defining initial state initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) # 'state' is a tensor of shape [batch_size, cell_state_size] outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data, initial_state=initial_state, dtype=tf.float32) ``` ```python # create 2 LSTMCells rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]] # create a RNN cell composed sequentially of a number of RNNCells multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) # 'outputs' is a tensor of shape [batch_size, max_time, 256] # 'state' is a N-tuple where N is the number of LSTMCells containing a # tf.contrib.rnn.LSTMStateTuple for each cell outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=data, dtype=tf.float32) ``` Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for performance than correctness. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. If cells are `LSTMCells` `state` will be a tuple containing a `LSTMStateTuple` for each cell. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list. RuntimeError: If not using control flow v2. # Currently only support time_major == True case. # TODO(b/123051275): We need to check if the cells are TfLiteLSTMCells or # TfLiteRNNCells. # For LstmCell, the index is 2. # For RnnCell, the index is 1. # So we use -1 meaning it's the last one. # For LstmCell, the index is 2. # For RnnCell, the index is 1. # So we use -1 meaning it's the last one. # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. # By default, time_major==False and inputs are batch-major: shaped # [batch, time, depth] # For internal calculations, we transpose to [time, batch, depth] # (batch, time, depth) => (time, batch, depth) # Just to find it in the graph. # Perform some shape validation # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth]. # If we are performing batch-major calculations, transpose output back # to shape [batch, time, depth] # (time, batch, depth) => (batch, time, depth)
1.650672
2
mg/model/MusicTransformer/train.py
SJTMusicTeam/MusicGeneration
6
6629780
# import sys # sys.path.append('/data2/qt/MusicGeneration/mg/model/Musictransformer') from network import MusicTransformer from metrics import * from criterion import SmoothCrossEntropyLoss, CustomSchedule import config from data import Data import utils import datetime import time import optparse import torch import torch.optim as optim # from tensorboardX import SummaryWriter def get_options(): parser = optparse.OptionParser() parser.add_option('-s', '--save_path', dest='save_path', type='string', default='/data2/qt/MusicGeneration/mg/model/MusicTransformer/save_model/') parser.add_option('-d', '--dataset', dest='data_path', type='string', default='/data2/qt/MusicTransformer-pytorch/dataset/processed/') parser.add_option('-e', '--epochs', dest='epochs', type='int', default=20000) parser.add_option('-i', '--saving-interval', dest='saving_interval', type='int', default=50) parser.add_option('-b', '--batch-size', dest='batch_size', type='int', default=config.train['batch_size']) parser.add_option('-l', '--learning-rate', dest='learning_rate', type='float', default=config.train['learning_rate']) parser.add_option('-w', '--window-size', dest='window_size', type='int', default=config.train['window_size']) parser.add_option('-S', '--stride-size', dest='stride_size', type='int', default=config.train['stride_size']) parser.add_option('-g', '--multi_gpu', dest='multi_gpu', type='string', default='False') parser.add_option('-m', '--load_path', dest='load_path', type='string', default=None) parser.add_option('-M', '--max_seq', dest='max_seq', type='int', default=2048) return parser.parse_args()[0] options = get_options() # ------------------------------------------------------------------------ saving_interval = options.saving_interval window_size = options.window_size stride_size = options.stride_size data_path = options.data_path l_r = options.learning_rate batch_size = options.batch_size pickle_dir = options.data_path max_seq = options.max_seq epochs = options.epochs load_path = options.load_path save_path = options.save_path if options.multi_gpu == 'True' : multi_gpu = True else: multi_gpu = False event_dim = config.event_dim #EventSeq.dim() model_config = config.model device = config.device limlen = config.window_size print('-' * 70) print('Save path:', save_path) print('Dataset path:', data_path) print('Saving interval:', saving_interval) print('-' * 70) print('Hyperparameters:', utils.dict2params(model_config)) print('Learning rate:', l_r) print('Batch size:', batch_size) print('Window size:', window_size) print('Stride size:', stride_size) print('Device:', device) print('-' * 70) # ======================================================================== # Load model and dataset # ======================================================================== # check cuda if torch.cuda.is_available(): config.device = torch.device('cuda') else: config.device = torch.device('cpu') # init metric set metric_set = MetricsSet({ 'accuracy': CategoricalAccuracy(), 'loss': SmoothCrossEntropyLoss(config.label_smooth, config.vocab_size, config.pad_token), 'bucket': LogitsBucketting(config.vocab_size) }) start_epoch = 0 def load_model(): global model_config, device, start_epoch model = MusicTransformer(**model_config) model.to(device) opt = optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9) scheduler = CustomSchedule(config.embedding_dim, optimizer=opt) if load_path is not None: checkpoint = torch.load(load_path) model.load_state_dict(checkpoint['net']) model.to(device) opt.load_state_dict(checkpoint['optimizer']) scheduler = CustomSchedule(config.embedding_dim, optimizer=opt) start_epoch = checkpoint['epoch'] + 1 # model.load_state_dict(torch.load(load_path)) print(f'Success load {load_path}') model.eval() eval_x, eval_y = dataset.slide_seq2seq_batch(2, config.max_seq, 'valid') eval_x = torch.from_numpy(eval_x).contiguous().to(config.device, dtype=torch.int) eval_y = torch.from_numpy(eval_y).contiguous().to(config.device, dtype=torch.int) eval_preiction, _ = model.forward(eval_x) eval_metrics = metric_set(eval_preiction, eval_y) print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_metrics['loss'], eval_metrics['accuracy'])) return model, scheduler """ def load_dataset(limlen): global data_path dataset = Event_Dataset(data_path, limlen, verbose=True) dataset_size = len(dataset.samples) assert dataset_size > 0 return dataset """ # load data print(pickle_dir) dataset = Data(pickle_dir, max_seq) print(dataset) print('Loading model') mt, scheduler = load_model() # print(mt) # print('-' * 70) # # print('Loading dataset') # # print(os.path.isdir(data_path)) # dataset = load_dataset(limlen) # print(dataset) print('-' * 70) # ------------------------------------------------------------------------ def save_model(epoch, acc = 0.0): global save_path, mt, scheduler state = {'net':mt.state_dict(), 'optimizer': scheduler.optimizer.state_dict(), 'epoch': epoch} #torch.save(single_mt.state_dict(), args.model_dir+'/train-{}-{}.pth'.format(e, eval_metrics['accuracy'])) print('Saving to', save_path+'train-{}-{}.pth'.format(epoch, acc)) torch.save(state, save_path+'train-{}-{}.pth'.format(epoch, acc)) # torch.save({'model_config': model_config, # 'model_state': model.state_dict(), # 'model_optimizer_state': optimizer.state_dict()}, save_path) print('Done saving') # ======================================================================== # Training # ======================================================================== # load model learning_rate = l_r # # define model # mt = MusicTransformer( # embedding_dim=config.embedding_dim, # vocab_size=config.vocab_size, # num_layer=config.num_layers, # max_seq=config.max_seq, # dropout=config.dropout, # debug=config.debug, loader_path=config.load_path # ) # mt.to(config.device) # multi-GPU set # single_mt = mt # if torch.cuda.device_count() > 1 and multi_gpu: # mt = torch.nn.DataParallel(mt, output_device=torch.cuda.device_count()-1) print(mt) print('| Summary - Device Info : {}'.format(torch.cuda.device)) # define tensorboard writer current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') # train_log_dir = 'logs/'+config.experiment+'/'+current_time+'/train' # eval_log_dir = 'logs/'+config.experiment+'/'+current_time+'/eval' # train_summary_writer = SummaryWriter(train_log_dir) # eval_summary_writer = SummaryWriter(eval_log_dir) # Train Start scheduler.optimizer.zero_grad() print(">> Train start...") idx = 0 for e in range(start_epoch, config.epochs): try: print(">>> [Epoch was updated]") for b in range(len(dataset.file_dict['train']) // config.batch_size): try: batch_x, batch_y = dataset.slide_seq2seq_batch(config.batch_size, config.max_seq) batch_x = torch.from_numpy(batch_x).contiguous().to(config.device, non_blocking=True, dtype=torch.int) batch_y = torch.from_numpy(batch_y).contiguous().to(config.device, non_blocking=True, dtype=torch.int) except IndexError: continue start_time = time.time() mt.train() sample = mt.forward(batch_x) metrics = metric_set(sample, batch_y) loss = metrics['loss'] / config.accum_grad loss.backward() if (b+1) % config.accum_grad == 0: scheduler.step() # train_summary_writer.add_scalar('loss', metrics['loss'], global_step=idx) # train_summary_writer.add_scalar('accuracy', metrics['accuracy'], global_step=idx) # train_summary_writer.add_scalar('learning_rate', scheduler.rate(), global_step=idx) # train_summary_writer.add_scalar('iter_p_sec', end_time-start_time, global_step=idx) scheduler.optimizer.zero_grad() end_time = time.time() # if config.debug: # print("[Loss]: {}".format(loss)) torch.cuda.empty_cache() idx += 1 # switch output device to: gpu-1 ~ gpu-n sw_start = time.time() if torch.cuda.device_count() > 1: mt.output_device = idx % (torch.cuda.device_count() -1) + 1 sw_end = time.time() # if config.debug: # print('output switch time: {}'.format(sw_end - sw_start) ) # result_metrics = metric_set(sample, batch_y) mt.eval() eval_x, eval_y = dataset.slide_seq2seq_batch(2, config.max_seq, 'valid') eval_x = torch.from_numpy(eval_x).contiguous().to(config.device, dtype=torch.int) eval_y = torch.from_numpy(eval_y).contiguous().to(config.device, dtype=torch.int) eval_preiction, weights = mt.forward(eval_x) eval_metrics = metric_set(eval_preiction, eval_y) if (e+1) % 50 == 0: save_model(e, eval_metrics['accuracy']) # if b == 0: # # train_summary_writer.add_histogram("target_analysis", batch_y, global_step=e) # # train_summary_writer.add_histogram("source_analysis", batch_x, global_step=e) # for i, weight in enumerate(weights): # attn_log_name = "attn/layer-{}".format(i) # # utils.attention_image_summary( # # attn_log_name, weight, step=idx, writer=eval_summary_writer) # eval_summary_writer.add_scalar('loss', eval_metrics['loss'], global_step=idx) # eval_summary_writer.add_scalar('accuracy', eval_metrics['accuracy'], global_step=idx) # eval_summary_writer.add_histogram("logits_bucket", eval_metrics['bucket'], global_step=idx) print('\n====================================================') print('Epoch/Batch: {}/{}'.format(e, b)) print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format(metrics['loss'], metrics['accuracy'])) print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_metrics['loss'], eval_metrics['accuracy'])) except KeyboardInterrupt: save_model(e) print(Exception) break save_model(epochs) # eval_summary_writer.close() # train_summary_writer.close()
# import sys # sys.path.append('/data2/qt/MusicGeneration/mg/model/Musictransformer') from network import MusicTransformer from metrics import * from criterion import SmoothCrossEntropyLoss, CustomSchedule import config from data import Data import utils import datetime import time import optparse import torch import torch.optim as optim # from tensorboardX import SummaryWriter def get_options(): parser = optparse.OptionParser() parser.add_option('-s', '--save_path', dest='save_path', type='string', default='/data2/qt/MusicGeneration/mg/model/MusicTransformer/save_model/') parser.add_option('-d', '--dataset', dest='data_path', type='string', default='/data2/qt/MusicTransformer-pytorch/dataset/processed/') parser.add_option('-e', '--epochs', dest='epochs', type='int', default=20000) parser.add_option('-i', '--saving-interval', dest='saving_interval', type='int', default=50) parser.add_option('-b', '--batch-size', dest='batch_size', type='int', default=config.train['batch_size']) parser.add_option('-l', '--learning-rate', dest='learning_rate', type='float', default=config.train['learning_rate']) parser.add_option('-w', '--window-size', dest='window_size', type='int', default=config.train['window_size']) parser.add_option('-S', '--stride-size', dest='stride_size', type='int', default=config.train['stride_size']) parser.add_option('-g', '--multi_gpu', dest='multi_gpu', type='string', default='False') parser.add_option('-m', '--load_path', dest='load_path', type='string', default=None) parser.add_option('-M', '--max_seq', dest='max_seq', type='int', default=2048) return parser.parse_args()[0] options = get_options() # ------------------------------------------------------------------------ saving_interval = options.saving_interval window_size = options.window_size stride_size = options.stride_size data_path = options.data_path l_r = options.learning_rate batch_size = options.batch_size pickle_dir = options.data_path max_seq = options.max_seq epochs = options.epochs load_path = options.load_path save_path = options.save_path if options.multi_gpu == 'True' : multi_gpu = True else: multi_gpu = False event_dim = config.event_dim #EventSeq.dim() model_config = config.model device = config.device limlen = config.window_size print('-' * 70) print('Save path:', save_path) print('Dataset path:', data_path) print('Saving interval:', saving_interval) print('-' * 70) print('Hyperparameters:', utils.dict2params(model_config)) print('Learning rate:', l_r) print('Batch size:', batch_size) print('Window size:', window_size) print('Stride size:', stride_size) print('Device:', device) print('-' * 70) # ======================================================================== # Load model and dataset # ======================================================================== # check cuda if torch.cuda.is_available(): config.device = torch.device('cuda') else: config.device = torch.device('cpu') # init metric set metric_set = MetricsSet({ 'accuracy': CategoricalAccuracy(), 'loss': SmoothCrossEntropyLoss(config.label_smooth, config.vocab_size, config.pad_token), 'bucket': LogitsBucketting(config.vocab_size) }) start_epoch = 0 def load_model(): global model_config, device, start_epoch model = MusicTransformer(**model_config) model.to(device) opt = optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9) scheduler = CustomSchedule(config.embedding_dim, optimizer=opt) if load_path is not None: checkpoint = torch.load(load_path) model.load_state_dict(checkpoint['net']) model.to(device) opt.load_state_dict(checkpoint['optimizer']) scheduler = CustomSchedule(config.embedding_dim, optimizer=opt) start_epoch = checkpoint['epoch'] + 1 # model.load_state_dict(torch.load(load_path)) print(f'Success load {load_path}') model.eval() eval_x, eval_y = dataset.slide_seq2seq_batch(2, config.max_seq, 'valid') eval_x = torch.from_numpy(eval_x).contiguous().to(config.device, dtype=torch.int) eval_y = torch.from_numpy(eval_y).contiguous().to(config.device, dtype=torch.int) eval_preiction, _ = model.forward(eval_x) eval_metrics = metric_set(eval_preiction, eval_y) print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_metrics['loss'], eval_metrics['accuracy'])) return model, scheduler """ def load_dataset(limlen): global data_path dataset = Event_Dataset(data_path, limlen, verbose=True) dataset_size = len(dataset.samples) assert dataset_size > 0 return dataset """ # load data print(pickle_dir) dataset = Data(pickle_dir, max_seq) print(dataset) print('Loading model') mt, scheduler = load_model() # print(mt) # print('-' * 70) # # print('Loading dataset') # # print(os.path.isdir(data_path)) # dataset = load_dataset(limlen) # print(dataset) print('-' * 70) # ------------------------------------------------------------------------ def save_model(epoch, acc = 0.0): global save_path, mt, scheduler state = {'net':mt.state_dict(), 'optimizer': scheduler.optimizer.state_dict(), 'epoch': epoch} #torch.save(single_mt.state_dict(), args.model_dir+'/train-{}-{}.pth'.format(e, eval_metrics['accuracy'])) print('Saving to', save_path+'train-{}-{}.pth'.format(epoch, acc)) torch.save(state, save_path+'train-{}-{}.pth'.format(epoch, acc)) # torch.save({'model_config': model_config, # 'model_state': model.state_dict(), # 'model_optimizer_state': optimizer.state_dict()}, save_path) print('Done saving') # ======================================================================== # Training # ======================================================================== # load model learning_rate = l_r # # define model # mt = MusicTransformer( # embedding_dim=config.embedding_dim, # vocab_size=config.vocab_size, # num_layer=config.num_layers, # max_seq=config.max_seq, # dropout=config.dropout, # debug=config.debug, loader_path=config.load_path # ) # mt.to(config.device) # multi-GPU set # single_mt = mt # if torch.cuda.device_count() > 1 and multi_gpu: # mt = torch.nn.DataParallel(mt, output_device=torch.cuda.device_count()-1) print(mt) print('| Summary - Device Info : {}'.format(torch.cuda.device)) # define tensorboard writer current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') # train_log_dir = 'logs/'+config.experiment+'/'+current_time+'/train' # eval_log_dir = 'logs/'+config.experiment+'/'+current_time+'/eval' # train_summary_writer = SummaryWriter(train_log_dir) # eval_summary_writer = SummaryWriter(eval_log_dir) # Train Start scheduler.optimizer.zero_grad() print(">> Train start...") idx = 0 for e in range(start_epoch, config.epochs): try: print(">>> [Epoch was updated]") for b in range(len(dataset.file_dict['train']) // config.batch_size): try: batch_x, batch_y = dataset.slide_seq2seq_batch(config.batch_size, config.max_seq) batch_x = torch.from_numpy(batch_x).contiguous().to(config.device, non_blocking=True, dtype=torch.int) batch_y = torch.from_numpy(batch_y).contiguous().to(config.device, non_blocking=True, dtype=torch.int) except IndexError: continue start_time = time.time() mt.train() sample = mt.forward(batch_x) metrics = metric_set(sample, batch_y) loss = metrics['loss'] / config.accum_grad loss.backward() if (b+1) % config.accum_grad == 0: scheduler.step() # train_summary_writer.add_scalar('loss', metrics['loss'], global_step=idx) # train_summary_writer.add_scalar('accuracy', metrics['accuracy'], global_step=idx) # train_summary_writer.add_scalar('learning_rate', scheduler.rate(), global_step=idx) # train_summary_writer.add_scalar('iter_p_sec', end_time-start_time, global_step=idx) scheduler.optimizer.zero_grad() end_time = time.time() # if config.debug: # print("[Loss]: {}".format(loss)) torch.cuda.empty_cache() idx += 1 # switch output device to: gpu-1 ~ gpu-n sw_start = time.time() if torch.cuda.device_count() > 1: mt.output_device = idx % (torch.cuda.device_count() -1) + 1 sw_end = time.time() # if config.debug: # print('output switch time: {}'.format(sw_end - sw_start) ) # result_metrics = metric_set(sample, batch_y) mt.eval() eval_x, eval_y = dataset.slide_seq2seq_batch(2, config.max_seq, 'valid') eval_x = torch.from_numpy(eval_x).contiguous().to(config.device, dtype=torch.int) eval_y = torch.from_numpy(eval_y).contiguous().to(config.device, dtype=torch.int) eval_preiction, weights = mt.forward(eval_x) eval_metrics = metric_set(eval_preiction, eval_y) if (e+1) % 50 == 0: save_model(e, eval_metrics['accuracy']) # if b == 0: # # train_summary_writer.add_histogram("target_analysis", batch_y, global_step=e) # # train_summary_writer.add_histogram("source_analysis", batch_x, global_step=e) # for i, weight in enumerate(weights): # attn_log_name = "attn/layer-{}".format(i) # # utils.attention_image_summary( # # attn_log_name, weight, step=idx, writer=eval_summary_writer) # eval_summary_writer.add_scalar('loss', eval_metrics['loss'], global_step=idx) # eval_summary_writer.add_scalar('accuracy', eval_metrics['accuracy'], global_step=idx) # eval_summary_writer.add_histogram("logits_bucket", eval_metrics['bucket'], global_step=idx) print('\n====================================================') print('Epoch/Batch: {}/{}'.format(e, b)) print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format(metrics['loss'], metrics['accuracy'])) print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(eval_metrics['loss'], eval_metrics['accuracy'])) except KeyboardInterrupt: save_model(e) print(Exception) break save_model(epochs) # eval_summary_writer.close() # train_summary_writer.close()
en
0.33211
# import sys # sys.path.append('/data2/qt/MusicGeneration/mg/model/Musictransformer') # from tensorboardX import SummaryWriter # ------------------------------------------------------------------------ #EventSeq.dim() # ======================================================================== # Load model and dataset # ======================================================================== # check cuda # init metric set # model.load_state_dict(torch.load(load_path)) def load_dataset(limlen): global data_path dataset = Event_Dataset(data_path, limlen, verbose=True) dataset_size = len(dataset.samples) assert dataset_size > 0 return dataset # load data # print(mt) # print('-' * 70) # # print('Loading dataset') # # print(os.path.isdir(data_path)) # dataset = load_dataset(limlen) # print(dataset) # ------------------------------------------------------------------------ #torch.save(single_mt.state_dict(), args.model_dir+'/train-{}-{}.pth'.format(e, eval_metrics['accuracy'])) # torch.save({'model_config': model_config, # 'model_state': model.state_dict(), # 'model_optimizer_state': optimizer.state_dict()}, save_path) # ======================================================================== # Training # ======================================================================== # load model # # define model # mt = MusicTransformer( # embedding_dim=config.embedding_dim, # vocab_size=config.vocab_size, # num_layer=config.num_layers, # max_seq=config.max_seq, # dropout=config.dropout, # debug=config.debug, loader_path=config.load_path # ) # mt.to(config.device) # multi-GPU set # single_mt = mt # if torch.cuda.device_count() > 1 and multi_gpu: # mt = torch.nn.DataParallel(mt, output_device=torch.cuda.device_count()-1) # define tensorboard writer # train_log_dir = 'logs/'+config.experiment+'/'+current_time+'/train' # eval_log_dir = 'logs/'+config.experiment+'/'+current_time+'/eval' # train_summary_writer = SummaryWriter(train_log_dir) # eval_summary_writer = SummaryWriter(eval_log_dir) # Train Start # train_summary_writer.add_scalar('loss', metrics['loss'], global_step=idx) # train_summary_writer.add_scalar('accuracy', metrics['accuracy'], global_step=idx) # train_summary_writer.add_scalar('learning_rate', scheduler.rate(), global_step=idx) # train_summary_writer.add_scalar('iter_p_sec', end_time-start_time, global_step=idx) # if config.debug: # print("[Loss]: {}".format(loss)) # switch output device to: gpu-1 ~ gpu-n # if config.debug: # print('output switch time: {}'.format(sw_end - sw_start) ) # result_metrics = metric_set(sample, batch_y) # if b == 0: # # train_summary_writer.add_histogram("target_analysis", batch_y, global_step=e) # # train_summary_writer.add_histogram("source_analysis", batch_x, global_step=e) # for i, weight in enumerate(weights): # attn_log_name = "attn/layer-{}".format(i) # # utils.attention_image_summary( # # attn_log_name, weight, step=idx, writer=eval_summary_writer) # eval_summary_writer.add_scalar('loss', eval_metrics['loss'], global_step=idx) # eval_summary_writer.add_scalar('accuracy', eval_metrics['accuracy'], global_step=idx) # eval_summary_writer.add_histogram("logits_bucket", eval_metrics['bucket'], global_step=idx) # eval_summary_writer.close() # train_summary_writer.close()
2.051392
2
events/views.py
aduuna/edsa-ug
0
6629781
from django.shortcuts import render, get_object_or_404, get_list_or_404 from django.http import Http404, HttpResponse, HttpResponseRedirect from .models import Event from .forms import EventRegisterForm from django.utils import timezone from django.urls import reverse # Create your views here. def index(request): now = timezone.now() upcoming_events = Event.objects.filter(date__gte=now).order_by('date')[:5] recent_events = Event.objects.filter(date__lt=now).order_by('-date')[:5] args = { 'upcoming_events':upcoming_events, 'recent_events':recent_events, } return render(request, 'events/index.html',args) def detail(request, event_id): event = get_object_or_404(Event, pk=event_id) args = {'event':event} return render(request, 'events/detail.html', args) def register(request, event_id): if request.method == 'POST': form = EventRegisterForm(request.POST) if form.is_valid(): event = get_object_or_404(Event, pk=1) data = form.cleaned_data attendee = event.attendee_set.create(name=data['name'],email=data['email']) if event.already_registered(attendee): attendee.delete() return render(request, 'events/registerform.html', {'form':form, 'event_id':event_id, 'error_message':'User Already Registered'}) else: attendee.save() return HttpResponseRedirect(reverse('events:done', args=(event_id,))) else: form = EventRegisterForm() return render(request, 'events/registerform.html', {'form':form, 'event_id':event_id}) def done(request, event_id): return render(request, 'events/donepage.html')
from django.shortcuts import render, get_object_or_404, get_list_or_404 from django.http import Http404, HttpResponse, HttpResponseRedirect from .models import Event from .forms import EventRegisterForm from django.utils import timezone from django.urls import reverse # Create your views here. def index(request): now = timezone.now() upcoming_events = Event.objects.filter(date__gte=now).order_by('date')[:5] recent_events = Event.objects.filter(date__lt=now).order_by('-date')[:5] args = { 'upcoming_events':upcoming_events, 'recent_events':recent_events, } return render(request, 'events/index.html',args) def detail(request, event_id): event = get_object_or_404(Event, pk=event_id) args = {'event':event} return render(request, 'events/detail.html', args) def register(request, event_id): if request.method == 'POST': form = EventRegisterForm(request.POST) if form.is_valid(): event = get_object_or_404(Event, pk=1) data = form.cleaned_data attendee = event.attendee_set.create(name=data['name'],email=data['email']) if event.already_registered(attendee): attendee.delete() return render(request, 'events/registerform.html', {'form':form, 'event_id':event_id, 'error_message':'User Already Registered'}) else: attendee.save() return HttpResponseRedirect(reverse('events:done', args=(event_id,))) else: form = EventRegisterForm() return render(request, 'events/registerform.html', {'form':form, 'event_id':event_id}) def done(request, event_id): return render(request, 'events/donepage.html')
en
0.968116
# Create your views here.
2.093593
2
modules/s3db/req.py
arnavsharma93/eden
0
6629782
# -*- coding: utf-8 -*- """ Sahana Eden Request Model @copyright: 2009-2013 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ["S3RequestModel", "S3RequestItemModel", "S3RequestSkillModel", "S3RequestRecurringModel", "S3RequestSummaryModel", "S3RequestTaskModel", "S3CommitModel", "S3CommitItemModel", "S3CommitPersonModel", "S3CommitSkillModel", "req_item_onaccept", "req_update_status", "req_rheader", "req_match", "req_add_from_template", "req_customize_req_fields", "req_req_list_layout", "req_customize_commit_fields", "req_commit_list_layout", ] from gluon import * from gluon.storage import Storage from ..s3 import * from s3layouts import S3AddResourceLink REQ_STATUS_NONE = 0 REQ_STATUS_PARTIAL = 1 REQ_STATUS_COMPLETE = 2 REQ_STATUS_CANCEL = 3 # ============================================================================= class S3RequestModel(S3Model): """ """ names = ["req_req", "req_req_id", "req_req_ref", "req_hide_quantities", "req_inline_form", "req_create_form_mods", "req_prep", "req_tabs", "req_priority_opts", ] def model(self): T = current.T db = current.db auth = current.auth session = current.session s3 = current.response.s3 settings = current.deployment_settings person_id = self.pr_person_id messages = current.messages NONE = messages["NONE"] UNKNOWN_OPT = messages.UNKNOWN_OPT AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP s3_string_represent = lambda str: str if str else NONE add_components = self.add_components crud_strings = s3.crud_strings set_method = self.set_method super_link = self.super_link # Multiple Item/Skill Types per Request? multiple_req_items = settings.get_req_multiple_req_items() req_status_writable = settings.get_req_status_writable() req_status_opts = {REQ_STATUS_NONE: SPAN(T("None"), _class = "req_status_none"), REQ_STATUS_PARTIAL: SPAN(T("Partial"), _class = "req_status_partial"), REQ_STATUS_COMPLETE: SPAN(T("Complete"), _class = "req_status_complete"), } req_status = S3ReusableField("req_status", "integer", label = T("Request Status"), requires = IS_NULL_OR( IS_IN_SET(req_status_opts, zero = None)), represent = lambda opt: \ req_status_opts.get(opt, UNKNOWN_OPT), default = REQ_STATUS_NONE, writable = req_status_writable, ) req_ref = S3ReusableField("req_ref", "string", label = T("%(REQ)s Number") % # dict(REQ=settings.get_req_shortname()), writable = False, represent = self.req_ref_represent, ) req_priority_opts = {3: T("High"), 2: T("Medium"), 1: T("Low") } req_types_deployed = settings.get_req_req_type() req_type_opts = {} if settings.has_module("inv") and "Stock" in req_types_deployed: # Number hardcoded in controller & JS req_type_opts[1] = settings.get_req_type_inv_label() #if settings.has_module("asset") and "Asset" in req_types_deployed: # req_type_opts[2] = T("Assets") if settings.has_module("hrm") and "People" in req_types_deployed: req_type_opts[3] = settings.get_req_type_hrm_label() #if settings.has_module("cr") and "Shelter" in req_types_deployed: # req_type_opts[4] = T("Shelter") if "Other" in req_types_deployed: req_type_opts[9] = T("Other") use_commit = settings.get_req_use_commit() req_ask_security = settings.get_req_ask_security() req_ask_transport = settings.get_req_ask_transport() date_writable = settings.get_req_date_writable() requester_label = settings.get_req_requester_label() requester_is_author = settings.get_req_requester_is_author() if requester_is_author: site_default = auth.user.site_id if auth.is_logged_in() else None requester_default = auth.s3_logged_in_person() else: site_default = None requester_default = None # Dropdown or Autocomplete? if settings.get_org_site_autocomplete(): site_widget = S3SiteAutocompleteWidget() site_comment = S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id"), title=T("Create Facility"), tooltip=AUTOCOMPLETE_HELP) else: site_widget = None site_comment = S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id"), title=T("Create Facility")) # --------------------------------------------------------------------- # Requests tablename = "req_req" self.define_table(tablename, super_link("doc_id", "doc_entity"), # @ToDo: Replace with Link Table self.event_event_id( default=session.s3.event, readable = False, writable = False, ondelete="SET NULL"), Field("type", "integer", requires = IS_IN_SET(req_type_opts, zero=None), represent = lambda opt: \ req_type_opts.get(opt, UNKNOWN_OPT), label = T("Request Type")), req_ref(), s3_datetime(label = T("Date Requested"), default="now", past=8760, # Hours, so 1 year future=0, readable=date_writable, writable=date_writable, #represent="date", #widget="date", ), Field("priority", "integer", default = 2, label = T("Priority"), #@ToDo: Colour code the priority text - red, orange, green represent = lambda opt: \ req_priority_opts.get(opt, UNKNOWN_OPT), #represent = self.req_priority_represent, requires = IS_NULL_OR( IS_IN_SET(req_priority_opts)) ), # This is a component, so needs to be a super_link # - can't override field name, ondelete or requires super_link("site_id", "org_site", comment = site_comment, default = site_default, empty = False, filterby = "obsolete", filter_opts = [False], instance_types = auth.org_site_types, label = T("Requested For Facility"), readable = True, represent = self.org_site_represent, updateable = True, widget = site_widget, writable = True, ), #Field("location", # label = T("Neighborhood")), # Donations: What will the Items be used for?; People: Task Details s3_comments("purpose", comment = "", label = T("Purpose"), # Only-needed for summary mode (unused) #represent = self.req_purpose_represent, represent = s3_string_represent, ), Field("is_template", "boolean", label = T("Recurring Request?"), represent = s3_yes_no_represent, default = False, comment = DIV(_class="tooltip", _title="%s|%s" % (T("Recurring Request?"), T("If this is a request template to be added repeatedly then the schedule can be set on the next page."))), ), s3_datetime("date_required", label = T("Date Needed By"), past=1, # Allow time for people to fill out form future=8760, # Hours, so 1 year #represent="date", #widget="date", ), s3_datetime("date_required_until", label = T("Date Required Until"), past=0, future=8760, # Hours, so 1 year readable = False, writable = False ), person_id("requester_id", label = requester_label, empty = settings.get_req_requester_optional(), #writable = False, comment = S3AddResourceLink(c="pr", f="person", vars = dict(child="requester_id", parent="req"), title=crud_strings["pr_person"].label_create, tooltip=AUTOCOMPLETE_HELP), default = requester_default ), person_id("assigned_to_id", # This field should be in req_commit, but that complicates the UI readable = False, writable = False, label = T("Assigned To") ), person_id("approved_by_id", label = T("Approved By"), readable = False, writable = False, ), person_id("request_for_id", label = T("Requested For"), readable = False, writable = False, #default = auth.s3_logged_in_person() ), Field("transport_req", "boolean", represent = s3_yes_no_represent, readable = req_ask_transport, writable = req_ask_transport, label = T("Transportation Required")), Field("security_req", "boolean", represent = s3_yes_no_represent, readable = req_ask_security, writable = req_ask_security, label = T("Security Required")), s3_datetime("date_recv", label = T("Date Received"), # Could be T("Date Delivered") - make deployment_setting past=8760, # Hours, so 1 year future=0, readable = False, writable = False, ), person_id("recv_by_id", label = T("Received By"), # @ToDo: Set this in Update forms? Dedicated 'Receive' button? # (Definitely not in Create forms) #default = auth.s3_logged_in_person() ), # Simple Status # - currently just enabled in customize_req_fields() workflow req_status(readable = False, writable = False, ), # Detailed Status req_status("commit_status", readable = use_commit, writable = req_status_writable and use_commit, represent = self.req_commit_status_represent, label = T("Commit. Status")), req_status("transit_status", label = T("Transit Status")), req_status("fulfil_status", label = T("Fulfil. Status")), Field("closed", "boolean", label = T("Closed"), comment = DIV(_class="tooltip", _title="%s|%s" % (T("Closed"), T("No more items may be added to this request"))), default = False), Field("cancel", "boolean", label = T("Cancel"), default = False), Field.Method("details", req_req_details), Field.Method("drivers", req_req_drivers), s3_comments(comment=""), *s3_meta_fields()) # @todo: make lazy_table table = db[tablename] if len(req_type_opts) == 1: k, v = req_type_opts.items()[0] field = table.type field.default = k field.writable = False field.readable = False if not settings.get_req_use_req_number(): table.req_ref.readable = False table.req_ref.writable = False # CRUD strings ADD_REQUEST = T("Make Request") crud_strings[tablename] = Storage( label_create = ADD_REQUEST, title_display = T("Request Details"), title_list = T("Requests"), title_map=T("Map of Requests"), title_report = T("Requests Report"), title_update = T("Edit Request"), label_list_button = T("List Requests"), label_delete_button = T("Delete Request"), msg_record_created = T("Request Added"), msg_record_modified = T("Request Updated"), msg_record_deleted = T("Request Canceled"), msg_list_empty = T("No Requests")) # Which levels of Hierarchy are we using? hierarchy = current.gis.get_location_hierarchy() levels = hierarchy.keys() if len(settings.get_gis_countries()) == 1 or \ s3.gis.config.region_location_id: try: levels.remove("L0") except: pass filter_widgets = [ #S3TextFilter(["committer_id$first_name", # "committer_id$middle_name", # "committer_id$last_name", # "site_id$name", # "comments", # "req_id$name", # "organisation_id$name" # ], # label = T("Search") # comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), # ), S3OptionsFilter("transit_status", label = T("Transit Status"), options = req_status_opts, cols = 3, ), S3OptionsFilter("fulfil_status", label = T("Fulfill Status"), options = req_status_opts, cols = 3, ), S3LocationFilter("site_id$location_id", levels=levels, widget="multiselect", hidden=True, ), S3OptionsFilter("site_id", label=T("Requested For Facility"), widget="multiselect", hidden=True, ), S3OptionsFilter("created_by", label=T("Logged By"), widget="multiselect", hidden=True, ), S3DateFilter("date", label=T("Date"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for requests made between these dates."), hidden=True, ), S3DateFilter("date_required", label=T("Date Needed By"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for requests required between these dates."), hidden=True, ), ] if "Stock" in req_type_opts: filter_widgets.insert(4, S3OptionsFilter("item_category.name", label = T("Item Category"), widget = "multiselect", hidden=True, )) if len(req_type_opts) > 1: filter_widgets.insert(2, S3OptionsFilter("type", label=T("Type"), cols = len(req_type_opts), hidden=True, )) if use_commit: filter_widgets.insert(2, S3OptionsFilter("commit_status", label = T("Commit Status"), options = req_status_opts, cols = 3, )) report_fields = ["priority", "site_id$organisation_id", #"site_id$location_id$L1", #"site_id$location_id$L2", "site_id$location_id$L3", "site_id$location_id$L4", "site_id", ] # @ToDo: id gets stripped in _select_field fact_fields = report_fields + [(T("Requests"), "id")] # Reusable Field represent = self.req_represent req_id = S3ReusableField("req_id", "reference %s" % tablename, requires = IS_NULL_OR( IS_ONE_OF(db, "req_req.id", lambda id, row: represent(id, row, show_link=False), orderby="req_req.date", sort=True) ), represent = represent, sortby = "date", label = T("Request"), ondelete = "CASCADE", ) list_fields = ["id", "date", "date_required", "site_id", "requester_id", #"event_id", # @ToDo: Vary by deployment_setting (easy) # @ToDo: Allow a single column to support different components based on type # @ToDo: Include Qty too (Computed VF in component?) #(T("Items"), "item.item_id"), #(T("Skills"), "skill.skill_id"), ] if settings.get_req_use_req_number(): list_fields.insert(1, "req_ref") #if len(settings.get_req_req_type()) > 1: # list_fields.append("type") list_fields.append((T("Drivers"), "drivers")) list_fields.append("priority") # @ToDo: Deprecate with type-based components (see above) list_fields.append((T("Details"), "details")) if use_commit: list_fields.append("commit_status") list_fields.append("transit_status") list_fields.append("fulfil_status") list_fields.append((T("Committed By"), "commit.site_id")) self.configure(tablename, onaccept = self.req_onaccept, ondelete = self.req_req_ondelete, deduplicate = self.req_req_duplicate, listadd = False, orderby = "req_req.date desc", filter_widgets = filter_widgets, report_options = Storage( rows=report_fields, cols=report_fields, fact=fact_fields, methods=["count", "list", "sum"], defaults=Storage(rows="site_id$location_id$L4", cols="priority", fact="count(id)", totals=True, ) ), list_fields = list_fields, extra_fields = ["req_ref", "type"], context = {"event": "event_id", "location": "site_id$location_id", "organisation": "site_id$organisation_id", "site": "site_id", }, ) # Custom Methods set_method("req", "req", method="check", action=self.req_check) set_method("req", "req", method="commit_all", action=self.req_commit_all) set_method("req", "req", method="copy_all", action=self.req_copy_all) # Print Forms set_method("req", "req", method="form", action=self.req_form) # Components add_components(tablename, # Documents req_document="req_id", # Requested Items req_req_item={"joinby": "req_id", "multiple": multiple_req_items, }, # Requested Skills req_req_skill={"joinby": "req_id", "multiple": multiple_req_items, }, # Commitment req_commit="req_id", # Item Categories supply_item_category={"link": "req_req_item_category", "joinby": "req_id", "key": "item_category_id", }, **{# Scheduler Jobs (for recurring requests) S3Task.TASK_TABLENAME: {"name": "job", "joinby": "req_id", "link": "req_job", "key": "scheduler_task_id", "actuate": "replace", }, } ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_create_form_mods = self.req_create_form_mods, req_hide_quantities = self.req_hide_quantities, req_inline_form = self.req_inline_form, req_prep = self.req_prep, req_priority_opts = req_priority_opts, req_priority_represent = self.req_priority_represent, req_req_id = req_id, req_req_ref = req_ref, req_status_opts = req_status_opts, req_type_opts = req_type_opts, req_tabs = self.req_tabs, ) # ------------------------------------------------------------------------- def defaults(self): """ Safe defaults for model-global names in case module is disabled """ req_ref = S3ReusableField("req_ref", "string", readable=False, writable=False) return dict(req_req_ref = req_ref ) # ------------------------------------------------------------------------- @staticmethod def req_create_form_mods(): """ Function to be called from REST prep functions - main module & components (sites & events) """ T = current.T db = current.db s3 = current.response.s3 settings = current.deployment_settings # Hide fields which don't make sense in a Create form table = db.req_req table.req_ref.readable = False table.commit_status.readable = table.commit_status.writable = False table.transit_status.readable = table.transit_status.writable = False table.fulfil_status.readable = table.fulfil_status.writable = False table.cancel.readable = table.cancel.writable = False table.date_recv.readable = table.date_recv.writable = False table.recv_by_id.readable = table.recv_by_id.writable = False if settings.get_req_requester_from_site(): # Filter the list of Contacts to those for the site table.requester_id.widget = None s3.jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, })''' % T("No contacts yet defined for this site")) #table.site_id.comment = A(T("Set as default Site"), # _id="req_req_site_id_link", # _target="_blank", # _href=URL(c="default", # f="user", # args=["profile"])) req_types = settings.get_req_req_type() if "People" in req_types: # Show the Required Until Field # (gets turned-off by JS for other types) table.date_required_until.writable = True if "type" not in current.request.vars: # Script to inject into Pages which include Request create forms req_helptext = ''' i18n.req_purpose="%s" i18n.req_site_id="%s" i18n.req_request_for_id="%s" i18n.req_recv_by_id="%s" i18n.req_items_purpose="%s" i18n.req_items_site_id="%s" i18n.req_items_recv_by_id="%s" i18n.req_people_purpose="%s" i18n.req_people_site_id="%s" i18n.req_people_recv_by_id="%s" i18n.req_next_msg="%s" i18n.req_other_msg="%s" i18n.req_details_mandatory="%s"''' % (table.purpose.label, table.site_id.label, table.request_for_id.label, table.recv_by_id.label, T("What the Items will be used for"), T("Deliver To"), T("Delivered To"), T("Task Details"), T("Report To"), T("Reported To"), T("Please enter the details on the next screen."), T("Please enter request details here."), T("Details field is required!")) s3.js_global.append(req_helptext) s3.scripts.append("/%s/static/scripts/S3/s3.req_create_variable.js" % current.request.application) else: s3.scripts.append("/%s/static/scripts/S3/s3.req_create.js" % current.request.application) return # ------------------------------------------------------------------------- @staticmethod def req_inline_form(type, method): """ Function to be called from REST prep functions - to add req_item & req_skill components as inline forms """ T = current.T s3db = current.s3db table = s3db.req_req s3 = current.response.s3 postprocess = s3.req_req_postprocess if type == 1: # Dropdown not Autocomplete itable = s3db.req_req_item itable.item_id.widget = None jquery_ready = s3.jquery_ready jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'item_id', 'targetName':'item_pack_id', 'lookupPrefix':'supply', 'lookupResource':'item_pack', 'msgNoRecords':i18n.no_packs, 'fncPrep':S3.supply.fncPrepItem, 'fncRepresent':S3.supply.fncRepresentItem })''') # Custom Form settings = current.deployment_settings fields = ["site_id", "requester_id", "date", "priority", "date_required", S3SQLInlineComponent( "req_item", label = T("Items"), fields = ["item_id", "item_pack_id", "quantity", "comments" ] ), "comments", ] if method == "update": if settings.get_req_status_writable(): fields.insert(7, "fulfil_status") if settings.get_req_show_quantity_transit(): fields.insert(7, "transit_status") if settings.get_req_use_commit(): fields.insert(7, "commit_status") fields.insert(7, "date_recv") if settings.get_req_requester_from_site(): # Filter the list of Contacts to those for the site table.requester_id.widget = None jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, })''' % T("No contacts yet defined for this site")) table.site_id.comment = A(T("Set as default Site"), _id="req_req_site_id_link", _target="_blank", _href=URL(c="default", f="user", args=["profile"])) if settings.get_req_items_ask_purpose(): fields.insert(6, "purpose") if method != "update": fields.insert(1, "is_template") if settings.get_req_use_req_number() and \ not settings.get_req_generate_req_number(): fields.insert(0, "req_ref") if postprocess: crud_form = S3SQLCustomForm(*fields, postprocess=postprocess) else: crud_form = S3SQLCustomForm(*fields) s3db.configure("req_req", crud_form=crud_form) elif type == 3: # Custom Form stable = s3db.req_req_skill stable.skill_id.label = T("Required Skills (optional)") # Custom Form settings = current.deployment_settings fields = ["site_id", "requester_id", "date", "priority", "date_required", "date_required_until", "purpose", S3SQLInlineComponent( "req_skill", label = T("Skills"), fields = ["quantity", "skill_id", "comments" ] ), "comments", ] if method == "update": if settings.get_req_status_writable(): fields.insert(8, "fulfil_status") if settings.get_req_show_quantity_transit(): fields.insert(8, "transit_status") if settings.get_req_use_commit(): fields.insert(8, "commit_status") fields.insert(8, "date_recv") if settings.get_req_requester_from_site(): # Filter the list of Contacts to those for the site table.requester_id.widget = None s3.jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, })''' % T("No contacts yet defined for this site")) table.site_id.comment = A(T("Set as default Site"), _id="req_req_site_id_link", _target="_blank", _href=URL(c="default", f="user", args=["profile"])) else: fields.insert(1, "is_template") if settings.get_req_use_req_number() and \ not settings.get_req_generate_req_number(): fields.insert(0, "req_ref") if postprocess: crud_form = S3SQLCustomForm(*fields, postprocess=postprocess) else: crud_form = S3SQLCustomForm(*fields) s3db.configure("req_req", crud_form=crud_form) # ------------------------------------------------------------------------- @staticmethod def req_prep(r): """ Function to be called from REST prep functions - main module & components (sites) """ if not r.component or r.component.name =="req": default_type = current.db.req_req.type.default if default_type: T = current.T req_submit_button = {1:T("Save and add Items"), 3:T("Save and add People")} current.response.s3.crud.submit_button = req_submit_button[default_type] return # ------------------------------------------------------------------------- @staticmethod def req_represent(id, row=None, show_link=True): """ Represent a Request """ if row: table = current.db.req_req elif not id: return current.messages["NONE"] else: id = int(id) if id: db = current.db table = db.req_req row = db(table.id == id).select(table.date, table.req_ref, table.site_id, limitby=(0, 1)).first() try: if row.req_ref: req = row.req_ref else: req = "%s - %s" % (table.site_id.represent(row.site_id, show_link=False), table.date.represent(row.date)) except: return current.messages.UNKNOWN_OPT if show_link: return A(req, _href = URL(c = "req", f = "req", args = [id]), _title = current.T("Go to Request")) else: return req # ------------------------------------------------------------------------- @staticmethod def req_commit_status_represent(opt): """ Represet the Commitment Status of the Request """ if opt == REQ_STATUS_COMPLETE: # Include the Site Name of the Committer if we can # @ToDo: figure out how! return SPAN(current.T("Complete"), _class = "req_status_complete") else: return current.s3db.req_status_opts.get(opt, current.messages.UNKNOWN_OPT) # ------------------------------------------------------------------------- @staticmethod def req_ref_represent(value, show_link=True, pdf=False): """ Represent for the Request Reference if show_link is True then it will generate a link to the record if pdf is True then it will generate a link to the PDF """ if value: if show_link: db = current.db table = db.req_req req_row = db(table.req_ref == value).select(table.id, limitby=(0, 1) ).first() if req_row: if pdf: args = [req_row.id, "form"] else: args = [req_row.id] return A(value, _href = URL(c = "req", f = "req", args = args ), ) return B(value) return current.messages["NONE"] # ------------------------------------------------------------------------- @staticmethod def req_form(r, **attr): """ Generate a PDF of a Request Form """ db = current.db table = db.req_req record = db(table.id == r.id).select(limitby=(0, 1)).first() if record.type == 1: pdf_componentname = "req_item" list_fields = ["item_id", "item_pack_id", "quantity", "quantity_commit", "quantity_transit", "quantity_fulfil", ] elif record.type == 3: pdf_componentname = "req_skill" list_fields = ["skill_id", "quantity", "quantity_commit", "quantity_transit", "quantity_fulfil", ] else: # Not Supported - redirect to normal PDF redirect(URL(args=current.request.args[0], extension="pdf")) if current.deployment_settings.get_req_use_req_number(): filename = record.req_ref else: filename = None exporter = S3Exporter().pdf return exporter(r.resource, request=r, method = "list", pdf_title = current.deployment_settings.get_req_form_name(), pdf_filename = filename, list_fields = list_fields, pdf_hide_comments = True, pdf_componentname = pdf_componentname, pdf_header_padding = 12, #pdf_footer = inv_recv_pdf_footer, pdf_table_autogrow = "B", pdf_paper_alignment = "Landscape", **attr ) # ------------------------------------------------------------------------- @staticmethod def req_copy_all(r, **attr): """ Custom Method to copy an existing Request - creates a req with req_item records """ db = current.db s3db = current.s3db table = s3db.req_req settings = current.deployment_settings now = current.request.now record = r.record req_id = record.id # Make a copy of the request record if settings.get_req_use_req_number(): code = s3db.supply_get_shipping_code(settings.get_req_shortname(), record.site_id, table.req_ref, ) else: code = None if record.date_required and record.date_required < now: date_required = now + datetime.timedelta(days=14) else: date_required = record.date_required new_req_id = table.insert(type = record.type, req_ref = code, date = now, date_required = date_required, priority = record.priority, site_id = record.site_id, purpose = record.purpose, requester_id = record.requester_id, transport_req = record.transport_req, security_req = record.security_req, comments = record.comments ) # Make a copy of each child record if record.type == 1: # Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.id, ritable.item_entity_id, ritable.item_id, ritable.item_pack_id, ritable.quantity, ritable.pack_value, ritable.currency, ritable.site_id, ritable.comments) if items: insert = ritable.insert for item in items: insert(req_id=new_req_id, item_entity_id = item.item_entity_id, item_id = item.item_id, item_pack_id = item.item_pack_id, quantity = item.quantity, pack_value = item.pack_value, currency = item.currency, site_id = item.site_id, comments = item.comments) elif record.type == 3: # People and skills rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.id, rstable.skill_id, rstable.quantity, rstable.site_id, rstable.comments) if skills: insert = rstable.insert for skill in skills: insert(req_id = new_req_id, skill_id = skill.skill_id, quantity = skill.quantity, site_id = skill.site_id, comments = skill.comments) redirect(URL(f="req", args=[new_req_id, "update"])) # ------------------------------------------------------------------------- @staticmethod def req_commit_all(r, **attr): """ Custom Method to commit to a Request - creates a commit with commit_items for each req_item """ T = current.T db = current.db s3db = current.s3db table = s3db.req_commit record = r.record req_id = record.id # Check if there is an existing Commitment query = (table.req_id == req_id) & \ (table.deleted == False) exists = db(query).select(table.id, limitby=(0, 1)) if exists: # Browse existing commitments redirect(URL(f="req", args=[r.id, "commit"])) type = record.type # Create the commitment cid = table.insert(req_id=req_id, type=type) if type == 1: # Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.id, ritable.item_pack_id, ritable.quantity, ritable.comments) if items: citable = s3db.req_commit_item insert = citable.insert for item in items: id = item.id quantity = item.quantity insert(commit_id=cid, req_item_id=id, item_pack_id=item.item_pack_id, quantity=quantity, comments=item.comments) # Mark Item in the Request as Committed db(ritable.id == item.id).update(quantity_commit=quantity) # Mark Request as Committed db(s3db.req_req.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) msg = T("You have committed to all items in this Request. Please check that all details are correct and update as-required.") elif type == 3: # People rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.id, rstable.skill_id, rstable.quantity, rstable.comments) if skills: cstable = s3db.req_commit_skill insert = cstable.insert for skill in skills: id = skill.id quantity = skill.quantity insert(commit_id=cid, skill_id=skill.skill_id, quantity=quantity, comments=skill.comments) # Mark Item in the Request as Committed db(rstable.id == skill.id).update(quantity_commit=quantity) # Mark Request as Committed db(s3db.req_req.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) msg = T("You have committed for all people in this Request. Please check that all details are correct and update as-required.") else: # Other # Mark Request as Committed db(s3db.req_req.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) msg = T("You have committed to this Request. Please check that all details are correct and update as-required.") if "send" in r.args: redirect(URL(f="send_commit", args=[cid])) current.session.confirmation = msg redirect(URL(c="req", f="commit", args=[cid])) # ------------------------------------------------------------------------- @staticmethod def req_priority_represent(id): """ """ src = URL(c="static", f="img", args=["priority", "priority_%d.gif" % (id or 4)] ) return DIV(IMG(_src= src)) # ------------------------------------------------------------------------- @staticmethod def req_hide_quantities(table): """ Hide the Update Quantity Status Fields from Request create forms """ if not current.deployment_settings.get_req_item_quantities_writable(): table.quantity_commit.writable = table.quantity_commit.readable = False table.quantity_transit.writable = table.quantity_transit.readable= False table.quantity_fulfil.writable = table.quantity_fulfil.readable = False # ------------------------------------------------------------------------- @staticmethod def req_tabs(r): """ Add a set of Tabs for a Site's Request Tasks @ToDo: Roll these up like inv_tabs in inv.py """ settings = current.deployment_settings if settings.get_org_site_inv_req_tabs(): s3_has_permission = current.auth.s3_has_permission if settings.has_module("req") and \ s3_has_permission("read", "req_req", c="req"): T = current.T tabs = [(T("Requests"), "req")] if s3_has_permission("read", "req_req", c=current.request.controller, f="req_match"): tabs.append((T("Match Requests"), "req_match/")) if settings.get_req_use_commit(): tabs.append((T("Commit"), "commit")) return tabs return [] # ------------------------------------------------------------------------- @staticmethod def req_check(r, **attr): """ Check to see if your Inventory can be used to match any open Requests """ T = current.T db = current.db s3db = current.s3db response = current.response s3 = response.s3 NONE = current.messages["NONE"] site_id = r.vars.site_id site_name = s3db.org_site_represent(site_id, show_link=False) output = {} output["title"] = T("Check Request") output["rheader"] = req_rheader(r, check_page=True) stable = s3db.org_site ltable = s3db.gis_location query = (stable.id == site_id ) & \ (stable.location_id == ltable.id) location_r = db(query).select(ltable.lat, ltable.lon, limitby=(0, 1)).first() query = (stable.id == r.record.site_id ) & \ (stable.location_id == ltable.id) req_location_r = db(query).select(ltable.lat, ltable.lon, limitby=(0, 1)).first() try: distance = current.gis.greatCircleDistance(location_r.lat, location_r.lon, req_location_r.lat, req_location_r.lon,) output["rheader"][0].append(TR(TH(T("Distance from %s:") % site_name), TD(T("%.1f km") % distance) )) except: pass output["subtitle"] = T("Request Items") use_commit = current.deployment_settings.get_req_use_commit() # Get req_items & inv_items from this site table = s3db.req_req_item query = (table.req_id == r.id ) & \ (table.deleted == False ) req_items = db(query).select(table.id, table.item_id, table.quantity, table.item_pack_id, table.quantity_commit, table.quantity_transit, table.quantity_fulfil) itable = s3db.inv_inv_item query = (itable.site_id == site_id ) & \ (itable.deleted == False ) inv_items_dict = {} inv_items = db(query).select(itable.item_id, itable.quantity, itable.item_pack_id, # VF #itable.pack_quantity, ) for item in inv_items: item_id = item.item_id if item_id in inv_items_dict: inv_items_dict[item_id] += item.quantity * item.pack_quantity() else: inv_items_dict[item_id] = item.quantity * item.pack_quantity() if len(req_items): row = TR(TH(table.item_id.label), TH(table.quantity.label), TH(table.item_pack_id.label), TH(table.quantity_transit.label), TH(table.quantity_fulfil.label), TH(T("Quantity in %s's Warehouse") % site_name), TH(T("Match?")) ) if use_commit: row.insert(3, TH(table.quantity_commit.label)) items = TABLE(THEAD(row), _id = "list", _class = "dataTable display") supply_item_represent = table.item_id.represent item_pack_represent = table.item_pack_id.represent no_match = True for req_item in req_items: # Convert inv item quantity to req item quantity item_id = req_item.item_id if item_id in inv_items_dict: inv_quantity = inv_items_dict[item_id] / req_item.pack_quantity() else: inv_quantity = NONE if inv_quantity != NONE: no_match = False if inv_quantity < req_item.quantity: status = SPAN(T("Partial"), _class = "req_status_partial") else: status = SPAN(T("YES"), _class = "req_status_complete") else: status = SPAN(T("NO"), _class = "req_status_none"), if use_commit: items.append(TR(#A(req_item.id), supply_item_represent(req_item.item_id), req_item.quantity, item_pack_represent(req_item.item_pack_id), # This requires an action btn to get the req_id req_item.quantity_commit, req_item.quantity_transit, req_item.quantity_fulfil, #req_quantity_represent(req_item.quantity_commit, "commit"), #req_quantity_represent(req_item.quantity_fulfil, "fulfil"), #req_quantity_represent(req_item.quantity_transit, "transit"), inv_quantity, status, ) ) else: items.append(TR(#A(req_item.id), supply_item_represent(req_item.item_id), req_item.quantity, item_pack_represent(req_item.item_pack_id), # This requires an action btn to get the req_id req_item.quantity_transit, req_item.quantity_fulfil, #req_quantity_represent(req_item.quantity_fulfil, "fulfil"), #req_quantity_represent(req_item.quantity_transit, "transit"), inv_quantity, status, ) ) output["items"] = items #s3.actions = [req_item_inv_item_btn] s3.no_sspag = True # pag won't work if no_match: current.response.warning = \ T("%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!") % \ dict(site=site_name) else: output["items"] = s3.crud_strings.req_req_item.msg_list_empty response.view = "list.html" s3.no_formats = True return output # ------------------------------------------------------------------------- @staticmethod def req_onaccept(form): """ After DB I/O """ db = current.db s3db = current.s3db request = current.request settings = current.deployment_settings tablename = "req_req" table = s3db.req_req form_vars = form.vars id = form_vars.id if form_vars.get("is_template", None): is_template = True f = "req_template" else: is_template = False f = "req" # If the req_ref is None then set it up if settings.get_req_use_req_number(): record = db(table.id == id).select(table.req_ref, table.site_id, limitby=(0, 1)).first() if not record.req_ref: code = s3db.supply_get_shipping_code(settings.get_req_shortname(), record.site_id, table.req_ref, ) db(table.id == id).update(req_ref = code) req_status = form_vars.get("req_status", None) if req_status is not None: # Translate Simple Status req_status = int(req_status) if req_status == REQ_STATUS_PARTIAL: # read current status record = db(table.id == id).select(table.commit_status, table.fulfil_status, limitby=(0, 1) ).first() data = dict(cancel = False) if record.commit_status != REQ_STATUS_COMPLETE: data["commit_status"] = REQ_STATUS_PARTIAL if record.fulfil_status == REQ_STATUS_COMPLETE: data["fulfil_status"] = REQ_STATUS_PARTIAL db(table.id == id).update(**data) elif req_status == REQ_STATUS_COMPLETE: db(table.id == id).update(fulfil_status = REQ_STATUS_COMPLETE, cancel = False, ) elif req_status == REQ_STATUS_CANCEL: db(table.id == id).update(cancel = True) elif req_status == REQ_STATUS_NONE: db(table.id == id).update(commit_status = REQ_STATUS_NONE, fulfil_status = REQ_STATUS_NONE, cancel = False) if settings.get_req_requester_to_site(): requester_id = form_vars.get("requester_id", None) if requester_id: site_id = form_vars.get("site_id", None) # If the requester has no HR record, then create one hrtable = s3db.hrm_human_resource query = (hrtable.person_id == requester_id) exists = db(query).select(hrtable.id, hrtable.organisation_id, hrtable.site_id, hrtable.site_contact, limitby=(0, 1) ).first() if exists: if site_id and not exists.site_id: # Check that the Request site belongs to this Org stable = s3db.org_site site = db(stable.site_id == site_id).select(stable.organisation_id, limitby=(0, 1) ).first() # @ToDo: Think about branches if site and site.organisation_id == exists.organisation_id: # Set the HR record as being for this site exists.update(site_id = site_id) s3db.hrm_human_resource_onaccept(exists) elif site_id: # Lookup the Org for the site stable = s3db.org_site site = db(stable.site_id == site_id).select(stable.organisation_id, limitby=(0, 1) ).first() # Is there already a site_contact for this site? ltable = s3db.hrm_human_resource_site query = (ltable.site_id == site_id) & \ (ltable.site_contact == True) already = db(query).select(ltable.id, limitby=(0, 1) ).first() if already: site_contact = False else: site_contact = True hr_id = hrtable.insert(person_id = requester_id, organisation_id = site.organisation_id, site_id = site_id, site_contact = site_contact, ) s3db.hrm_human_resource_onaccept(Storage(id=hr_id)) # Configure the next page to go to based on the request type if is_template: s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "job"]), update_next = URL(c="req", f=f, args=["[id]", "job"])) elif not settings.get_req_inline_forms(): if table.type.default: type = table.type.default elif "type" in form_vars: type = int(form_vars.type) else: type = 1 if type == 1 and settings.has_module("inv"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_item"]), update_next = URL(c="req", f=f, args=["[id]", "req_item"])) elif type == 2 and settings.has_module("asset"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_asset"]), update_next = URL(c="req", f=f, args=["[id]", "req_asset"])) elif type == 3 and settings.has_module("hrm"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_skill"]), update_next = URL(c="req", f=f, args=["[id]", "req_skill"])) elif type == 4 and settings.has_module("cr"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_shelter"]), update_next = URL(c="req", f=f, args=["[id]", "req_shelter"])) # ------------------------------------------------------------------------- @staticmethod def req_req_ondelete(row): """ Cleanup any scheduled tasks """ db = current.db table = db.scheduler_task query = (table.function_name == "req_add_from_template") & \ (table.args == "[%s]" % row.id) db(query).delete() # ------------------------------------------------------------------------- @staticmethod def req_req_duplicate(job): """ This callback will be called when importing records it will look to see if the record being imported is a duplicate. @param job: An S3ImportJob object which includes all the details of the record being imported If the record is a duplicate then it will set the job method to update Rules for finding a duplicate: - If the Request Number exists then it's a duplicate """ if job.tablename == "req_req": table = job.table if "req_ref" in job.data: request_number = job.data.req_ref else: return query = (table.req_ref == request_number) _duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if _duplicate: job.id = _duplicate.id job.data.id = _duplicate.id job.method = job.METHOD.UPDATE # ============================================================================= class S3RequestItemModel(S3Model): """ """ names = ["req_req_item", "req_item_id", "req_item_represent", "req_req_item_category", ] def model(self): T = current.T db = current.db settings = current.deployment_settings quantities_writable = settings.get_req_item_quantities_writable() use_commit = settings.get_req_use_commit() show_qty_transit = settings.get_req_show_quantity_transit() track_pack_values = settings.get_inv_track_pack_values() define_table = self.define_table req_id = self.req_req_id # ----------------------------------------------------------------- # Request Items # tablename = "req_req_item" define_table(tablename, req_id(empty=False), self.supply_item_entity_id, self.supply_item_id(), self.supply_item_pack_id(), Field("quantity", "double", notnull=True, requires = IS_FLOAT_IN_RANGE(minimum=1), represent=lambda v: \ IS_FLOAT_AMOUNT.represent(v, precision=2)), Field("pack_value", "double", readable=track_pack_values, writable=track_pack_values, label = T("Estimated Value per Pack")), # @ToDo: Move this into a Currency Widget for the pack_value field s3_currency(readable=track_pack_values, writable=track_pack_values), self.org_site_id, Field("quantity_commit", "double", label = T("Quantity Committed"), represent = self.req_qnty_commit_represent, default = 0, requires = IS_FLOAT_IN_RANGE(minimum=0, maximum=999999), readable = use_commit, writable = use_commit and quantities_writable), Field("quantity_transit", "double", label = T("Quantity in Transit"), represent = self.req_qnty_transit_represent, default = 0, requires = IS_FLOAT_IN_RANGE(minimum=0, maximum=999999), readable = show_qty_transit, writable = show_qty_transit and quantities_writable), Field("quantity_fulfil", "double", label = T("Quantity Fulfilled"), represent = self.req_qnty_fulfil_represent, default = 0, requires = IS_FLOAT_IN_RANGE(minimum=0, maximum=999999), writable = quantities_writable), Field.Method("pack_quantity", self.supply_item_pack_quantity(tablename=tablename)), s3_comments(), *s3_meta_fields()) # @todo: make lazy_table table = db[tablename] table.site_id.label = T("Requested From") # CRUD strings ADD_REQUEST_ITEM = T("Add Item to Request") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_REQUEST_ITEM, title_display = T("Request Item Details"), title_list = T("Items in Request"), title_update = T("Edit Item in Request"), label_list_button = T("List Items in Request"), label_delete_button = T("Delete Item from Request"), msg_record_created = T("Item(s) added to Request"), msg_record_modified = T("Item(s) updated on Request"), msg_record_deleted = T("Item(s) deleted from Request"), msg_list_empty = T("No Items currently requested")) # Reusable Field req_item_id = S3ReusableField("req_item_id", "reference %s" % tablename, requires = IS_NULL_OR( IS_ONE_OF(db, "req_req_item.id", self.req_item_represent, orderby="req_req_item.id", sort=True)), represent = self.req_item_represent, label = T("Request Item"), comment = DIV(_class="tooltip", _title="%s|%s" % (T("Request Item"), T("Select Items from the Request"))), ondelete = "CASCADE", script = ''' S3OptionsFilter({ 'triggerName':'req_item_id', 'targetName':'item_pack_id', 'lookupResource':'item_pack', 'lookupPrefix':'supply', 'lookupURL':S3.Ap.concat('/req/req_item_packs/'), 'msgNoRecords':i18n.no_packs, 'fncPrep':S3.supply.fncPrepItem, 'fncRepresent':S3.supply.fncRepresentItem })''') if settings.get_req_prompt_match(): # Shows the inventory items which match a requested item # @ToDo: Make this page a component of req_item create_next = URL(c="req", f="req_item_inv_item", args=["[id]"]) else: create_next = None list_fields = ["id", "item_id", "item_pack_id", ] if settings.get_req_prompt_match(): list_fields.append("site_id") list_fields.append("quantity") if use_commit: list_fields.append("quantity_commit") if show_qty_transit: list_fields.append("quantity_transit") list_fields.append("quantity_fulfil") list_fields.append("comments") filter_widgets = [ S3OptionsFilter("req_id$fulfil_status", label=T("Status"), options = self.req_status_opts, cols = 3, ), S3OptionsFilter("req_id$priority", label=T("Priority"), options = self.req_priority_opts, cols = 3, ), S3LocationFilter("req_id$site_id$location_id", levels = [#"L1", #"L2", "L3", "L4", ], widget = "multiselect", ), ] self.configure(tablename, super_entity = "supply_item_entity", onaccept = req_item_onaccept, ondelete = req_item_ondelete, create_next = create_next, deletable = settings.get_req_multiple_req_items(), deduplicate = self.req_item_duplicate, list_fields = list_fields, filter_widgets = filter_widgets, extra_fields = ["item_pack_id"], ) # --------------------------------------------------------------------- # # Req <> Item Category link table # # - used to provide a search filter # - populated onaccept/ondelete of req_item # tablename = "req_req_item_category" define_table(tablename, req_id(empty=False), self.supply_item_category_id(), *s3_meta_fields() ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_item_id = req_item_id, req_item_represent = self.req_item_represent, ) # ------------------------------------------------------------------------- def defaults(self): """ Safe defaults for model-global names in case module is disabled """ req_item_id = S3ReusableField("req_item_id", "integer", readable=False, writable=False) return dict(req_item_id = req_item_id ) # ------------------------------------------------------------------------- @staticmethod def req_item_represent(id, row=None): """ Represent a Request Item """ if row: # @ToDo: Optimised query where we don't need to do the join id = row.id elif not id: return current.messages["NONE"] db = current.db ritable = db.req_req_item sitable = db.supply_item query = (ritable.id == id) & \ (ritable.item_id == sitable.id) record = db(query).select(sitable.name, limitby = (0, 1)).first() if record: return record.name else: return None # --------------------------------------------------------------------- @staticmethod def req_qnty_commit_represent(quantity, show_link=True): """ call the generic quantity represent """ return S3RequestItemModel.req_quantity_represent(quantity, "commit", show_link) # --------------------------------------------------------------------- @staticmethod def req_qnty_transit_represent(quantity, show_link=True): """ call the generic quantity represent """ return S3RequestItemModel.req_quantity_represent(quantity, "transit", show_link) # --------------------------------------------------------------------- @staticmethod def req_qnty_fulfil_represent(quantity, show_link=True): """ call the generic quantity represent """ return S3RequestItemModel.req_quantity_represent(quantity, "fulfil", show_link) # --------------------------------------------------------------------- @staticmethod def req_quantity_represent(quantity, type, show_link=True): """ @ToDo: There should be better control of this feature - currently this only works with req_items which are being matched by commit / send / recv """ if quantity and show_link and \ not current.deployment_settings.get_req_item_quantities_writable(): return TAG[""](quantity, A(DIV(_class = "quantity %s ajax_more collapsed" % type ), _href = "#", ) ) else: return quantity # ------------------------------------------------------------------------- @staticmethod def req_item_delete(row): """ Update the """ h # ------------------------------------------------------------------------- @staticmethod def req_item_duplicate(job): """ This callback will be called when importing records it will look to see if the record being imported is a duplicate. @param job: An S3ImportJob object which includes all the details of the record being imported If the record is a duplicate then it will set the job method to update Rules for finding a duplicate: - If the Request Number matches - The item is the same """ if job.tablename == "req_req_item": itable = job.table s3db = current.s3db rtable = s3db.req_req stable = s3db.supply_item req_id = None item_id = None for ref in job.references: if ref.entry.tablename == "req_req": if ref.entry.id != None: req_id = ref.entry.id else: uuid = ref.entry.item_id jobitem = job.job.items[uuid] req_id = jobitem.id elif ref.entry.tablename == "supply_item": if ref.entry.id != None: item_id = ref.entry.id else: uuid = ref.entry.item_id jobitem = job.job.items[uuid] item_id = jobitem.id if req_id != None and item_id != None: query = (itable.req_id == req_id) & \ (itable.item_id == item_id) else: return _duplicate = current.db(query).select(itable.id, limitby=(0, 1)).first() if _duplicate: job.id = _duplicate.id job.data.id = _duplicate.id job.method = job.METHOD.UPDATE # ============================================================================= class S3RequestSkillModel(S3Model): """ """ names = ["req_req_skill", "req_skill_represent", ] def model(self): T = current.T settings = current.deployment_settings quantities_writable = settings.get_req_skill_quantities_writable() use_commit = settings.get_req_use_commit() define_table = self.define_table # ----------------------------------------------------------------- # Request Skills # tablename = "req_req_skill" define_table(tablename, self.req_req_id(empty=False), # Make this a Component #Field("task", # readable=False, # writable=False, # Populated from req_req 'Purpose' # label = T("Task Details")), self.hrm_multi_skill_id( label = T("Required Skills"), comment = T("Leave blank to request an unskilled person"), represent = lambda id: \ id and S3Represent(lookup="hrm_skill", multiple=True)(id) or \ T("No Skills Required"), ), # @ToDo: Add a minimum competency rating? Field("quantity", "integer", notnull=True, default = 1, requires = IS_INT_IN_RANGE(1, 999999), label = T("Number of People Required"), ), self.org_site_id, Field("quantity_commit", "integer", label = T("Quantity Committed"), default = 0, requires = IS_INT_IN_RANGE(1, 999999), readable = use_commit, writable = use_commit and quantities_writable), Field("quantity_transit", "integer", label = T("Quantity in Transit"), #represent = lambda quantity_transit: \ # req_quantity_represent(quantity_transit, # "transit"), default = 0, requires = IS_INT_IN_RANGE(1, 999999), writable = quantities_writable), Field("quantity_fulfil", "integer", label = T("Quantity Fulfilled"), default = 0, requires = IS_INT_IN_RANGE(1, 999999), writable = quantities_writable), s3_comments( #label = T("Task Details"), #comment = DIV(_class="tooltip", # _title="%s|%s" % (T("Task Details"), # T("Include any special requirements such as equipment which they need to bring."))) ), *s3_meta_fields()) # @todo: make lazy_table table = current.db[tablename] table.site_id.label = T("Requested From") if not settings.get_req_show_quantity_transit(): table.quantity_transit.writable = table.quantity_transit.readable= False # CRUD strings ADD_REQUEST_SKILL = T("Add Skill to Request") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_REQUEST_SKILL, title_display = T("Requested Skill Details"), title_list = T("Requested Skills"), title_update = T("Edit Requested Skill"), label_list_button = T("List Requested Skills"), label_delete_button = T("Remove Skill from Request"), msg_record_created = T("Skill added to Request"), msg_record_modified = T("Requested Skill updated"), msg_record_deleted = T("Skill removed from Request"), msg_list_empty = T("No Skills currently requested")) list_fields = ["id", "skill_id", # @ToDo: Activate based on a deployment_setting #"task", "quantity", "quantity_transit", "quantity_fulfil", "comments", ] if use_commit: list_fields.insert(3, "quantity_commit") # Filter Widgets filter_widgets = [ S3OptionsFilter("req_id$fulfil_status", label=T("Status"), options = self.req_status_opts, cols = 3, ), S3OptionsFilter("req_id$priority", label=T("Priority"), options = self.req_priority_opts, cols = 3, ), S3LocationFilter("req_id$site_id$location_id", levels = [#"L1", #"L2", "L3", "L4", ], widget = "multiselect", ), ] # Configuration self.configure(tablename, onaccept=req_skill_onaccept, # @ToDo: Produce a custom controller like req_item_inv_item? #create_next = URL(c="req", f="req_skill_skill", # args=["[id]"]), deletable = settings.get_req_multiple_req_items(), list_fields = list_fields, filter_widgets = filter_widgets, ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_skill_represent = self.req_skill_represent, ) # ----------------------------------------------------------------- @staticmethod def req_skill_represent(id): """ Used in controllers/req.py commit() """ if not id: return current.messages["NONE"] db = current.db rstable = db.req_req_skill hstable = db.hrm_skill query = (rstable.id == id) & \ (rstable.skill_id == hstable.id) record = db(query).select(hstable.name, limitby = (0, 1)).first() try: return record.name except: return current.messages.UNKNOWN_OPT # ============================================================================= class S3RequestRecurringModel(S3Model): """ """ names = ["req_job", ] def model(self): T = current.T s3 = current.response.s3 # ----------------------------------------------------------------- # Request Job # # Jobs for Scheduling Recurring Requests # tablename = "req_job" self.define_table(tablename, self.req_req_id(empty=False), s3.scheduler_task_id(), *s3_meta_fields()) # CRUD Strings ADD_JOB = T("Create Job") s3.crud_strings[tablename] = Storage( label_create = ADD_JOB, title_display = T("Request Job"), title_list = T("Request Schedule"), title_update = T("Edit Job"), label_list_button = T("List Jobs"), msg_record_created = T("Job added"), msg_record_modified = T("Job updated"), msg_record_deleted = T("Job deleted"), msg_list_empty = T("No jobs configured yet"), msg_no_match = T("No jobs configured")) # Resource Configuration self.set_method("req", "req", component_name="job", method="reset", action=req_job_reset) # Resource Configuration self.set_method("req", "req", component_name="job", method="run", action=req_job_run) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ------------------------------------------------------------------------- @staticmethod def req_recurring_duplicate(job): """ De-duplicate Recurring Request Jobs """ if job.tablename == "req_recurring": table = job.table name = job.data.get("name", None) query = (table.name == name) _duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if _duplicate: job.id = _duplicate.id job.data.id = _duplicate.id job.method = job.METHOD.UPDATE # ============================================================================= class S3RequestSummaryModel(S3Model): """ Simple Requests Management System - Organisations can request Money or Time from remote volunteers - Sites can request Time from local volunteers or accept drop-off for Goods """ names = ["req_organisation_needs", "req_site_needs", ] def model(self): T = current.T configure = self.configure crud_strings = current.response.s3.crud_strings define_table = self.define_table # ----------------------------------------------------------------- # Summary of Needs for an Organisation # tablename = "req_organisation_needs" define_table(tablename, self.org_organisation_id( requires = self.org_organisation_requires(required=True), ), Field("money", "boolean", label = T("Soliciting Cash Donations?"), represent = s3_yes_no_represent, default = False, ), Field("money_details", "text", label = T("Details"), widget = s3_richtext_widget, ), Field("vol", "boolean", label = T("Opportunities to Volunteer Remotely?"), represent = s3_yes_no_represent, default = False, ), Field("vol_details", "text", label = T("Details"), widget = s3_richtext_widget, ), *s3_meta_fields()) # CRUD strings ADD_NEEDS = T("Add Organization Needs") crud_strings[tablename] = Storage( title_display=T("Organization Needs"), title_update=T("Edit Organization Needs"), label_delete_button=T("Delete Organization Needs"), msg_record_created=T("Organization Needs added"), msg_record_modified=T("Organization Needs updated"), msg_record_deleted=T("Organization Needs deleted")) configure(tablename, context = {"organisation": "organisation_id", }, ) # ----------------------------------------------------------------- # Summary of Needs for a site # tablename = "req_site_needs" define_table(tablename, self.super_link("site_id", "org_site"), Field("vol", "boolean", label = T("Opportunities to Volunteer On-Site?"), represent = s3_yes_no_represent, default = False, ), Field("vol_details", "text", label = T("Details"), widget = s3_richtext_widget, ), Field("goods", "boolean", label = T("Drop-off Location for Goods?"), represent = s3_yes_no_represent, default = False, ), Field("goods_details", "text", label = T("Details"), widget = s3_richtext_widget, ), #s3_comments("needs", # label=T("Needs"), # comment=None, # widget=S3PriorityListWidget(), # ), *s3_meta_fields()) # CRUD strings ADD_NEEDS = T("Add Site Needs") crud_strings[tablename] = Storage( title_display=T("Site Needs"), title_update=T("Edit Site Needs"), label_delete_button=T("Delete Site Needs"), msg_record_created=T("Site Needs added"), msg_record_modified=T("Site Needs updated"), msg_record_deleted=T("Site Needs deleted")) configure(tablename, context = {"location": "site_id$organisation_id", "organisation": "organisation_id", "site": "site_id", }, ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ============================================================================= class S3RequestTaskModel(S3Model): """ Link Requests for Skills to Tasks """ names = ["req_task", ] def model(self): #T = current.T # ----------------------------------------------------------------- # Link Skill Requests to Tasks # tablename = "req_task_req" self.define_table(tablename, self.project_task_id(), self.req_req_id(empty=False), #self.req_req_person_id(), #self.req_req_skill_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ============================================================================= class S3CommitModel(S3Model): """ """ names = ["req_commit", "req_commit_id", ] def model(self): T = current.T db = current.db auth = current.auth s3 = current.response.s3 add_components = self.add_components settings = current.deployment_settings req_types = settings.get_req_req_type() commit_value = settings.get_req_commit_value() unsolicited_commit = settings.get_req_commit_without_request() committer_is_author = settings.get_req_committer_is_author() if committer_is_author: site_default = auth.user.site_id if auth.is_logged_in() else None committer_default = auth.s3_logged_in_person() else: site_default = None committer_default = None # Dropdown or Autocomplete? if settings.get_org_site_autocomplete(): site_widget = S3SiteAutocompleteWidget() site_comment = DIV(_class="tooltip", _title="%s|%s" % (T("From Facility"), current.messages.AUTOCOMPLETE_HELP)) else: site_widget = None site_comment = None # --------------------------------------------------------------------- # Commitments (Pledges) tablename = "req_commit" self.define_table(tablename, self.super_link("site_id", "org_site", comment = site_comment, default = site_default, label = T("From Facility"), # Non-Item Requests make False in the prep readable = True, writable = True, represent = self.org_site_represent, widget = site_widget, ), self.gis_location_id( # Used for reporting on where Donations originated readable = False, writable = False ), # Non-Item Requests make True in the prep self.org_organisation_id( readable = False, writable = False ), # @ToDo: deployment_setting for whether this can be empty self.req_req_id( empty = not unsolicited_commit, ), Field("type", "integer", # These are copied automatically from the Req readable = False, writable = False, ), s3_datetime(default = "now", represent = "date", ), s3_datetime("date_available", label = T("Date Available"), represent = "date", ), self.pr_person_id("committer_id", default = committer_default, label = T("Committed By"), comment = self.pr_person_comment(child="committer_id"), ), # @ToDo: Calculate this from line items in Item Commits Field("value", "double", label = T("Estimated Value"), readable = commit_value, writable = commit_value, ), # @ToDo: Move this into a Currency Widget for the value field s3_currency(readable = commit_value, writable = commit_value, ), Field("cancel", "boolean", default = False, label = T("Cancel"), readable = False, writable = False, ), s3_comments(), *s3_meta_fields()) # Which levels of Hierarchy are we using? hierarchy = current.gis.get_location_hierarchy() levels = hierarchy.keys() if len(settings.get_gis_countries()) == 1 or \ s3.gis.config.region_location_id: levels.remove("L0") filter_widgets = [ S3TextFilter(["committer_id$first_name", "committer_id$middle_name", "committer_id$last_name", "site_id$name", "comments", "req_id$name", "organisation_id$name" ], label = T("Search"), comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), ), S3LocationFilter("location_id", levels=levels, widget="multiselect", hidden=True, ), S3DateFilter("date", label=T("Date"), hide_time=True, comment=T("Search for commitments made between these dates."), hidden=True, ), S3DateFilter("date_available", label=T("Date Available"), hide_time=True, comment=T("Search for commitments available between these dates."), hidden=True, ), ] if len(req_types) > 1: filter_widgets.insert(1, S3OptionsFilter("type", label=T("Type"), cols = len(req_types), hidden=True, )) # CRUD strings ADD_COMMIT = T("Make Commitment") s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT, title_display = T("Commitment Details"), title_list = T("Commitments"), title_update = T("Edit Commitment"), label_list_button = T("List Commitments"), label_delete_button = T("Delete Commitment"), msg_record_created = T("Commitment Added"), msg_record_modified = T("Commitment Updated"), msg_record_deleted = T("Commitment Canceled"), msg_list_empty = T("No Commitments")) # Reusable Field commit_id = S3ReusableField("commit_id", "reference %s" % tablename, sortby="date", requires = IS_NULL_OR( IS_ONE_OF(db, "req_commit.id", self.commit_represent, orderby="req_commit.date", sort=True)), represent = self.commit_represent, label = T("Commitment"), ondelete = "CASCADE") self.configure(tablename, context = {"event": "req_id$event_id", "location": "location_id", "organisation": "organisation_id", "request": "req_id", #"site": "site_id", "site": "req_id$site_id", }, filter_widgets = filter_widgets, list_fields = ["site_id", "req_id", "committer_id", # @ToDo: Vary by deployment_setting (easy) # @ToDo: Allow a single column to support different components based on type # @ToDo: Include Qty too (Computed VF in component?) (T("Committed Items"), "commit_item.req_item_id$item_id"), #(T("Committed People"), "commit_person.person_id"), #(T("Committed Skills"), "commit_skill.skill_id"), "date", "date_available", "comments", ], # Commitments should only be made to a specific request listadd = unsolicited_commit, onaccept = self.commit_onaccept, ondelete = self.commit_ondelete, onvalidation = self.commit_onvalidation, ) # Components add_components(tablename, # Committed Items req_commit_item="commit_id", # Committed Persons req_commit_person="commit_id", # Committed Skills req_commit_skill="commit_id", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_commit_id = commit_id, ) # ------------------------------------------------------------------------- @staticmethod def commit_represent(id, row=None): """ Represent a Commit """ if row: table = current.db.req_commit elif not id: return current.messages["NONE"] else: db = current.db table = db.req_commit row = db(table.id == id).select(table.type, table.date, table.organisation_id, table.site_id, limitby=(0, 1)).first() if row.type == 1: # Items return "%s - %s" % (table.site_id.represent(row.site_id), table.date.represent(row.date)) else: return "%s - %s" % (table.organisation_id.represent(row.organisation_id), table.date.represent(row.date)) # ------------------------------------------------------------------------- @staticmethod def commit_onvalidation(form): """ Copy the request_type to the commitment """ req_id = s3_get_last_record_id("req_req") if req_id: rtable = current.s3db.req_req query = (rtable.id == req_id) req_record = current.db(query).select(rtable.type, limitby=(0, 1)).first() if req_record: form.vars.type = req_record.type # ------------------------------------------------------------------------- @staticmethod def commit_onaccept(form): """ Update Status of Request & components """ db = current.db s3db = current.s3db form_vars = form.vars # @ToDo: Will this always be in vars? id = form_vars.id if not id: return ctable = s3db.req_commit site_id = form_vars.get("site_id", None) if site_id: # Set location_id to location of site stable = s3db.org_site site = db(stable.site_id == site_id).select(stable.location_id, limitby=(0, 1)).first() if site and site.location_id: db(ctable.id == id).update(location_id = site.location_id) # Find the request rtable = s3db.req_req query = (ctable.id == id) & \ (rtable.id == ctable.req_id) req = db(query).select(rtable.id, rtable.type, rtable.req_status, rtable.commit_status, limitby=(0, 1)).first() if not req: return req_id = req.id type = req.type if type == 1: # Items # Update Commit Status for Items in the Request # Get the full list of items in the request ritable = s3db.req_req_item query = (ritable.req_id == req_id) & \ (ritable.deleted == False) ritems = db(query).select(ritable.id, ritable.item_pack_id, ritable.quantity, # Virtual Field #ritable.pack_quantity, ) # Get all Commits in-system citable = s3db.req_commit_item query = (ctable.req_id == req_id) & \ (citable.commit_id == ctable.id) & \ (citable.deleted == False) citems = db(query).select(citable.item_pack_id, citable.quantity, # Virtual Field #citable.pack_quantity, ) commit_qty = {} for item in citems: item_pack_id = item.item_pack_id if item_pack_id in commit_qty: commit_qty[item_pack_id] += (item.quantity * item.pack_quantity()) else: commit_qty[item_pack_id] = (item.quantity * item.pack_quantity()) complete = False for item in ritems: if item.item_pack_id in commit_qty: quantity_commit = commit_qty[item.item_pack_id] db(ritable.id == item.id).update(quantity_commit=quantity_commit) req_quantity = item.quantity * item.pack_quantity() if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 3: # People ## If this is a single person commitment, then create the commit_person record automatically #table = s3db.req_commit_person #table.insert(commit_id = id, # #skill_id = ???, # person_id = auth.s3_logged_in_person()) ## @ToDo: Mark Person's allocation status as 'Committed' # Update Commit Status for Skills in the Request # Get the full list of skills in the request # @ToDo: Breakdown to component Skills within multi rstable = s3db.req_req_skill query = (rstable.req_id == req_id) & \ (rstable.deleted == False) rskills = db(query).select(rstable.id, rstable.skill_id, rstable.quantity, ) # Get all Commits in-system cstable = s3db.req_commit_skill query = (ctable.req_id == req_id) & \ (cstable.commit_id == ctable.id) & \ (cstable.deleted == False) cskills = db(query).select(cstable.skill_id, cstable.quantity, ) commit_qty = {} for skill in cskills: multi_skill_id = skill.skill_id for skill_id in multi_skill_id: if skill_id in commit_qty: commit_qty[skill_id] += skill.quantity else: commit_qty[skill_id] = skill.quantity complete = False for skill in rskills: multi_skill_id = skill.skill_id quantity_commit = 0 for skill_id in multi_skill_id: if skill_id in commit_qty: if commit_qty[skill_id] > quantity_commit: quantity_commit = commit_qty[skill_id] db(rstable.id == skill.id).update(quantity_commit=quantity_commit) req_quantity = skill.quantity if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 9: # Other # Assume Partial not Complete # @ToDo: Provide a way for the committer to specify this data = {} if req.commit_status == REQ_STATUS_NONE: data["commit_status"] = REQ_STATUS_PARTIAL if req.req_status == REQ_STATUS_NONE: # Show as 'Responded' data["req_status"] = REQ_STATUS_PARTIAL if data: db(rtable.id == req_id).update(**data) # ------------------------------------------------------------------------- @staticmethod def commit_ondelete(row): """ Update Status of Request & components """ db = current.db s3db = current.s3db id = row.id # Find the request ctable = s3db.req_commit fks = db(ctable.id == id).select(ctable.deleted_fk, limitby=(0, 1) ).first().deleted_fk req_id = json.loads(fks)["req_id"] rtable = s3db.req_req req = db(rtable.id == req_id).select(rtable.id, rtable.type, rtable.commit_status, limitby=(0, 1)).first() if not req: return req_id = req.id type = req.type if type == 1: # Items # Update Commit Status for Items in the Request # Get the full list of items in the request ritable = s3db.req_req_item query = (ritable.req_id == req_id) & \ (ritable.deleted == False) ritems = db(query).select(ritable.id, ritable.item_pack_id, ritable.quantity, # Virtual Field #ritable.pack_quantity, ) # Get all Commits in-system # - less those from this commit citable = s3db.req_commit_item query = (ctable.req_id == req_id) & \ (citable.commit_id == ctable.id) & \ (citable.commit_id != id) & \ (citable.deleted == False) citems = db(query).select(citable.item_pack_id, citable.quantity, # Virtual Field #citable.pack_quantity, ) commit_qty = {} for item in citems: item_pack_id = item.item_pack_id if item_pack_id in commit_qty: commit_qty[item_pack_id] += (item.quantity * item.pack_quantity()) else: commit_qty[item_pack_id] = (item.quantity * item.pack_quantity()) complete = False for item in ritems: if item.item_pack_id in commit_qty: quantity_commit = commit_qty[item.item_pack_id] db(ritable.id == item.id).update(quantity_commit=quantity_commit) req_quantity = item.quantity * item.pack_quantity() if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) elif not citems: db(rtable.id == req_id).update(commit_status=REQ_STATUS_NONE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 3: # People ## If this is a single person commitment, then create the commit_person record automatically #table = s3db.req_commit_person #table.insert(commit_id = vars.id, # #skill_id = ???, # person_id = auth.s3_logged_in_person()) ## @ToDo: Mark Person's allocation status as 'Committed' # Update Commit Status for Skills in the Request # Get the full list of skills in the request rstable = s3db.req_req_skill query = (rstable.req_id == req_id) & \ (rstable.deleted == False) rskills = db(query).select(rstable.id, rstable.skill_id, rstable.quantity, ) # Get all Commits in-system # - less those from this commit cstable = s3db.req_commit_skill query = (ctable.req_id == req_id) & \ (cstable.commit_id == ctable.id) & \ (cstable.commit_id != id) & \ (cstable.deleted == False) cskills = db(query).select(cstable.skill_id, cstable.quantity, ) commit_qty = {} for skill in cskills: multi_skill_id = skill.skill_id for skill_id in multi_skill_id: if skill_id in commit_qty: commit_qty[skill_id] += skill.quantity else: commit_qty[skill_id] = skill.quantity complete = False for skill in rskills: multi_skill_id = skill.skill_id quantity_commit = 0 for skill_id in multi_skill_id: if skill_id in commit_qty: if commit_qty[skill_id] > quantity_commit: quantity_commit = commit_qty[skill_id] db(rstable.id == skill.id).update(quantity_commit=quantity_commit) req_quantity = skill.quantity if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) elif not cskills: db(rtable.id == req_id).update(commit_status=REQ_STATUS_NONE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 9: # Other if req.commit_status != REQ_STATUS_NONE: # Assume Complete not partial # @ToDo: Provide a way for the committer to specify this db(rtable.id == req_id).update(commit_status=REQ_STATUS_NONE) # ============================================================================= class S3CommitItemModel(S3Model): """ """ names = ["req_commit_item", "req_send_commit" ] def model(self): T = current.T # ----------------------------------------------------------------- # Commitment Items # @ToDo: Update the req_item_id in the commit_item if the req_id of the commit is changed tablename = "req_commit_item" self.define_table(tablename, self.req_commit_id(), #item_id, #supply_item_id(), self.req_item_id(), self.supply_item_pack_id(), Field("quantity", "double", notnull=True, label = T("Quantity")), Field.Method("pack_quantity", self.supply_item_pack_quantity(tablename=tablename)), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_COMMIT_ITEM = T("Add Item to Commitment") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT_ITEM, title_display = T("Commitment Item Details"), title_list = T("Commitment Items"), title_update = T("Edit Commitment Item"), label_list_button = T("List Commitment Items"), label_delete_button = T("Delete Commitment Item"), msg_record_created = T("Commitment Item added"), msg_record_modified = T("Commitment Item updated"), msg_record_deleted = T("Commitment Item deleted"), msg_list_empty = T("No Commitment Items currently registered")) self.configure(tablename, onaccept = self.commit_item_onaccept, extra_fields = ["item_pack_id"]) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(# Used by commit_req() controller req_commit_item_onaccept = self.commit_item_onaccept, req_send_commit = self.req_send_commit, ) # ------------------------------------------------------------------------- @staticmethod def commit_item_onaccept(form): """ Update the Commit Status for the Request Item & Request """ db = current.db vars = form.vars req_item_id = vars.req_item_id # Get the req_id ritable = db.req_req_item req = db(ritable.id == req_item_id).select(ritable.req_id, limitby=(0, 1)).first() if not req: return req_id = req.req_id # Get the full list of items in the request query = (ritable.req_id == req_id) & \ (ritable.deleted == False) ritems = db(query).select(ritable.id, ritable.item_pack_id, ritable.quantity, # Virtual Field #ritable.pack_quantity, ) # Get all Commits in-system ctable = db.req_commit citable = db.req_commit_item query = (ctable.req_id == req_id) & \ (citable.commit_id == ctable.id) & \ (citable.deleted == False) citems = db(query).select(citable.item_pack_id, citable.quantity, # Virtual Field #citable.pack_quantity, ) commit_qty = {} for item in citems: item_pack_id = item.item_pack_id if item_pack_id in commit_qty: commit_qty[item_pack_id] += (item.quantity * item.pack_quantity()) else: commit_qty[item_pack_id] = (item.quantity * item.pack_quantity()) complete = False for item in ritems: if item.item_pack_id in commit_qty: quantity_commit = commit_qty[item.item_pack_id] db(ritable.id == item.id).update(quantity_commit=quantity_commit) req_quantity = item.quantity * item.pack_quantity() if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status rtable = db.req_req if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) # ------------------------------------------------------------------------- @staticmethod def req_send_commit(): """ Create a Shipment containing all items in a Commitment """ # Get the commit record try: commit_id = current.request.args[0] except: redirect(URL(c="req", f="commit")) db = current.db s3db = current.s3db req_table = db.req_req rim_table = db.req_req_item com_table = db.req_commit cim_table = db.req_commit_item send_table = s3db.inv_send tracktable = s3db.inv_track_item query = (com_table.id == commit_id) & \ (com_table.req_id == req_table.id) & \ (com_table.deleted == False) record = db(query).select(com_table.committer_id, com_table.site_id, com_table.organisation_id, req_table.requester_id, req_table.site_id, req_table.req_ref, limitby=(0, 1)).first() # @ToDo: Identify if we have stock items which match the commit items # If we have a single match per item then proceed automatically (as-now) & then decrement the stock quantity # If we have no match then warn the user & ask if they should proceed anyway # If we have mulitple matches then provide a UI to allow the user to select which stock items to use # Create an inv_send and link to the commit vars = Storage(sender_id = record.req_commit.committer_id, site_id = record.req_commit.site_id, recipient_id = record.req_req.requester_id, to_site_id = record.req_req.site_id, req_ref = record.req_req.req_ref, status = 0) send_id = send_table.insert(**vars) vars.id = send_id # Get all of the committed items query = (cim_table.commit_id == commit_id) & \ (cim_table.req_item_id == rim_table.id) & \ (cim_table.deleted == False) records = db(query).select(rim_table.id, rim_table.item_id, rim_table.item_pack_id, rim_table.currency, rim_table.quantity, rim_table.quantity_transit, rim_table.quantity_fulfil, cim_table.quantity, ) # Create inv_track_items for each commit item insert = tracktable.insert for row in records: rim = row.req_req_item # Now done as a VirtualField instead (looks better & updates closer to real-time, so less of a race condition) #quantity_shipped = max(rim.quantity_transit, rim.quantity_fulfil) #quantity_needed = rim.quantity - quantity_shipped id = insert(req_item_id = rim.id, track_org_id = record.req_commit.organisation_id, send_id = send_id, status = 1, item_id = rim.item_id, item_pack_id = rim.item_pack_id, currency = rim.currency, #req_quantity = quantity_needed, quantity = row.req_commit_item.quantity, recv_quantity = row.req_commit_item.quantity, ) # Create the Waybill form = Storage() form.vars = vars s3db.inv_send_onaccept(form) # Redirect to inv_send for the send id just created redirect(URL(#c = "inv", or "req" f = "send", #args = [send_id, "track_item"] args = [send_id] )) # ============================================================================= class S3CommitPersonModel(S3Model): """ Commit a named individual to a Request """ names = ["req_commit_person"] def model(self): T = current.T # ----------------------------------------------------------------- # Committed Persons # tablename = "req_commit_person" self.define_table(tablename, self.req_commit_id(), # For reference self.hrm_multi_skill_id( writable=False, comment=None, ), # This should be person not hrm as we want to mark them as allocated self.pr_person_id(), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_COMMIT_PERSON = T("Add Person to Commitment") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT_PERSON, title_display = T("Committed Person Details"), title_list = T("Committed People"), title_update = T("Edit Committed Person"), label_list_button = T("List Committed People"), label_delete_button = T("Remove Person from Commitment"), msg_record_created = T("Person added to Commitment"), msg_record_modified = T("Committed Person updated"), msg_record_deleted = T("Person removed from Commitment"), msg_list_empty = T("No People currently committed")) # @ToDo: Fix this before enabling #self.configure(tablename, # onaccept = self.commit_person_onaccept) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ------------------------------------------------------------------------- @staticmethod def commit_person_onaccept(form): """ Not working """ db = current.db s3db = current.s3db table = db.req_commit_person rstable = s3db.req_req_skill # Try to get req_skill_id from the form req_skill_id = 0 if form: req_skill_id = form.vars.get("req_skill_id", None) if not req_skill_id: commit_skill_id = s3_get_last_record_id("req_commit_skill") r_commit_skill = table[commit_skill_id] req_skill_id = r_commit_skill.req_skill_id query = (table.req_skill_id == req_skill_id) & \ (table.deleted == False) commit_skills = db(query).select(table.quantity) quantity_commit = 0 for commit_skill in commit_skills: quantity_commit += commit_skill.quantity r_req_skill = db.req_req_skill[req_skill_id] rstable[req_skill_id] = dict(quantity_commit = quantity_commit) # Update status_commit of the req record s3_store_last_record_id("req_req_skill", r_req_skill.id) req_skill_onaccept(None) # ============================================================================= class S3CommitSkillModel(S3Model): """ Commit anonymous people to a Request """ names = ["req_commit_skill"] def model(self): T = current.T # ----------------------------------------------------------------- # Committed Skills # tablename = "req_commit_skill" self.define_table(tablename, self.req_commit_id(), self.hrm_multi_skill_id(), Field("quantity", "double", notnull=True, label = T("Quantity")), s3_comments(), *s3_meta_fields()) # CRUD strings current.response.s3.crud_strings[tablename] = Storage( label_create = T("Add People to Commitment"), title_display = T("Committed People Details"), title_list = T("Committed People"), title_update = T("Edit Committed People"), label_list_button = T("List Committed People"), label_delete_button = T("Remove People from Commitment"), msg_record_created = T("People added to Commitment"), msg_record_modified = T("Committed People updated"), msg_record_deleted = T("People removed from Commitment"), msg_list_empty = T("No People currently committed")) self.configure(tablename, onaccept = self.commit_skill_onaccept) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ------------------------------------------------------------------------- @staticmethod def commit_skill_onaccept(form): """ Update the Commit Status for the Request Skill & Request """ db = current.db vars = form.vars req_skill_id = vars.req_skill_id # Get the req_id rstable = db.req_req_skill req = db(rstable.id == req_skill_id).select(rstable.req_id, limitby=(0, 1)).first() if not req: return req_id = req.req_id # Get the full list of skills in the request query = (rstable.req_id == req_id) & \ (rstable.deleted == False) rskills = db(query).select(rstable.id, rstable.skill_id, rstable.quantity, ) # Get all Commits in-system ctable = db.req_commit cstable = db.req_commit_skill query = (ctable.req_id == req_id) & \ (cstable.commit_id == ctable.id) & \ (cstable.deleted == False) cskills = db(query).select(cstable.skill_id, cstable.quantity, ) commit_qty = {} for skill in cskills: multi_skill_id = skill.skill_id for skill_id in multi_skill_id: if skill_id in commit_qty: commit_qty[skill_id] += skill.quantity else: commit_qty[skill_id] = skill.quantity complete = False for skill in rskills: multi_skill_id = skill.skill_id quantity_commit = 0 for skill_id in multi_skill_id: if skill_id in commit_qty: if commit_qty[skill_id] > quantity_commit: quantity_commit = commit_qty[skill_id] db(rstable.id == skill.id).update(quantity_commit=quantity_commit) req_quantity = skill.quantity if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status rtable = db.req_req if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) # ============================================================================= def req_item_onaccept(form): """ Update Request Status Update req_item_category link table """ req_id = form.vars.get("req_id", None) if not req_id: req_id = s3_get_last_record_id("req_req") if not req_id: raise HTTP(500, "can not get req_id") # Update Request Status req_update_status(req_id) # Update req_item_category link table item_id = form.vars.get("item_id", None) db = current.db citable = db.supply_catalog_item cats = db(citable.item_id == item_id).select(citable.item_category_id) rictable = db.req_req_item_category for cat in cats: item_category_id = cat.item_category_id query = (rictable.deleted == False) & \ (rictable.req_id == req_id) & \ (rictable.item_category_id == item_category_id) exists = db(query).select(rictable.id, limitby=(0, 1)) if not exists: rictable.insert(req_id = req_id, item_category_id = item_category_id) # ============================================================================= def req_item_ondelete(row): """ """ db = current.db sitable = db.supply_item ritable = db.req_req_item item = db(ritable.id == row.id).select(ritable.deleted_fk, limitby=(0, 1)).first() fks = json.loads(item.deleted_fk) req_id = fks["req_id"] item_id = fks["item_id"] citable = db.supply_catalog_item cats = db(citable.item_id == item_id).select(citable.item_category_id) for cat in cats: item_category_id = cat.item_category_id # Check if we have other req_items in the same category query = (ritable.deleted == False) & \ (ritable.req_id == req_id) & \ (ritable.item_id == sitable.id) & \ (sitable.item_category_id == item_category_id) others = db(query).select(ritable.id, limitby=(0, 1)) if not others: # Delete req_item_category link table rictable = db.req_req_item_category query = (rictable.req_id == req_id) & \ (rictable.item_category_id == item_category_id) db(query).delete() # ============================================================================= def req_update_status(req_id): """ Update Request Status commit_status, transit_status, fulfil_status None => quantity = 0 for ALL items Partial => some items have quantity > 0 Complete => quantity_x = quantity(requested) for ALL items """ db = current.db s3db = current.s3db table = s3db.req_req_item is_none = dict(commit = True, transit = True, fulfil = True) is_complete = dict(commit = True, transit = True, fulfil = True) # Must check all items in the req query = (table.req_id == req_id) & \ (table.deleted == False ) req_items = db(query).select(table.quantity, table.quantity_commit, table.quantity_transit, table.quantity_fulfil) for req_item in req_items: quantity = req_item.quantity for status_type in ["commit", "transit", "fulfil"]: if req_item["quantity_%s" % status_type] < quantity: is_complete[status_type] = False if req_item["quantity_%s" % status_type]: is_none[status_type] = False status_update = {} for status_type in ["commit", "transit", "fulfil"]: if is_complete[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_COMPLETE elif is_none[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_NONE else: status_update["%s_status" % status_type] = REQ_STATUS_PARTIAL rtable = s3db.req_req db(rtable.id == req_id).update(**status_update) # ============================================================================= def req_skill_onaccept(form): """ Update req_req. commit_status, transit_status, fulfil_status None => quantity = 0 for ALL skills Partial => some skills have quantity > 0 Complete => quantity_x = quantity(requested) for ALL skills Create a Task for People to be assigned to """ if form and form.vars.req_id: req_id = form.vars.req_id else: req_id = s3_get_last_record_id("req_req") if not req_id: raise HTTP(500, "can not get req_id") db = current.db s3db = current.s3db rtable = s3db.req_req query = (rtable.id == req_id) record = db(query).select(rtable.purpose, limitby=(0, 1)).first() table = s3db.req_req_skill query = (table.req_id == req_id) #if record: # # Copy the Task description to the Skills component # db(query).update(task=record.purpose) is_none = dict(commit = True, transit = True, fulfil = True) is_complete = dict(commit = True, transit = True, fulfil = True) # Must check all skills in the req req_skills = db(query).select(table.quantity, table.quantity_commit, table.quantity_transit, table.quantity_fulfil) for req_skill in req_skills: quantity = req_skill.quantity for status_type in ["commit", "transit", "fulfil"]: if req_skill["quantity_%s" % status_type] < quantity: is_complete[status_type] = False if req_skill["quantity_%s" % status_type]: is_none[status_type] = False status_update = {} for status_type in ["commit", "transit", "fulfil"]: if is_complete[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_COMPLETE elif is_none[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_NONE else: status_update["%s_status" % status_type] = REQ_STATUS_PARTIAL query = (rtable.id == req_id) db(query).update(**status_update) if current.deployment_settings.has_module("project"): # Add a Task to which the People can be assigned # Get the request record otable = s3db.org_site query = (rtable.id == req_id) & \ (otable.id == rtable.site_id) record = db(query).select(rtable.req_ref, rtable.purpose, rtable.priority, rtable.requester_id, rtable.site_id, otable.location_id, limitby=(0, 1)).first() if not record: return name = record.req_req.req_ref or "Req: %s" % req_id table = s3db.project_task task = table.insert(name=name, description=record.req_req.purpose, priority=record.req_req.priority, location_id=record.org_site.location_id, site_id=record.req_req.site_id) # Add the Request as a Component to the Task table = s3db.table("req_task_req", None) if table: table.insert(task_id = task, req_id = req_id) # ============================================================================= def req_req_details(row): """ Show the requested items/skills """ if hasattr(row, "req_req"): row = row.req_req try: id = row.id type = row.type except AttributeError: return None if type == 1: s3db = current.s3db itable = s3db.supply_item ltable = s3db.req_req_item query = (ltable.deleted != True) & \ (ltable.req_id == id) & \ (ltable.item_id == itable.id) items = current.db(query).select(itable.name, ltable.quantity) if items: items = ["%s %s" % (int(item.req_req_item.quantity), item.supply_item.name) for item in items] return ",".join(items) elif type == 3: s3db = current.s3db ltable = s3db.req_req_skill query = (ltable.deleted != True) & \ (ltable.req_id == id) skills = current.db(query).select(ltable.skill_id, ltable.quantity) if skills: represent = S3Represent(lookup="hrm_skill", multiple=True, none=current.T("Unskilled") ) skills = ["%s %s" % (skill.quantity, represent(skill.skill_id)) \ for skill in skills] return ",".join(skills) return current.messages["NONE"] # ============================================================================= def req_req_drivers(row): """ Show the driver(s) details """ if hasattr(row, "req_req"): row = row.req_req try: req_ref = row.req_ref type = row.type except AttributeError: return None if type == 1: s3db = current.s3db stable = s3db.inv_send query = (stable.deleted != True) & \ (stable.req_ref == req_ref) drivers = current.db(query).select(stable.driver_name, stable.driver_phone, stable.vehicle_plate_no) if drivers: drivers = ["%s %s %s" % (driver.driver_name or "", driver.driver_phone or "", driver.vehicle_plate_no or "") \ for driver in drivers] return ",".join(drivers) return current.messages["NONE"] # ============================================================================= def req_rheader(r, check_page=False): """ Resource Header for Requests """ if r.representation == "html": if r.name == "req": record = r.record if record: T = current.T s3db = current.s3db request = current.request s3 = current.response.s3 settings = current.deployment_settings use_commit = settings.get_req_use_commit() is_template = record.is_template tabs = [(T("Edit Details"), None)] type = record.type if type == 1 and settings.has_module("inv"): if settings.get_req_multiple_req_items(): req_item_tab_label = T("Items") else: req_item_tab_label = T("Item") tabs.append((req_item_tab_label, "req_item")) elif type == 3 and settings.has_module("hrm"): tabs.append((T("People"), "req_skill")) tabs.append((T("Documents"), "document")) if is_template: tabs.append((T("Schedule"), "job")) elif use_commit: tabs.append((T("Commitments"), "commit")) if not check_page: rheader_tabs = s3_rheader_tabs(r, tabs) else: rheader_tabs = DIV() site_id = request.vars.site_id if site_id and not is_template: site_name = s3db.org_site_represent(site_id, show_link=False) commit_btn = A(T("Send from %s") % site_name, _href = URL(c = "req", f = "send_req", args = [r.id], vars = dict(site_id = site_id) ), _class = "action-btn" ) s3.rfooter = TAG[""](commit_btn) elif r.component and \ r.component_name == "commit" and \ r.component_id: prepare_btn = A(T("Prepare Shipment"), _href = URL(f = "send_commit", args = [r.component_id] ), _id = "send_commit", _class = "action-btn" ) s3.rfooter = TAG[""](prepare_btn) site_id = record.site_id if site_id: db = current.db stable = s3db.org_site if settings.get_req_show_quantity_transit() and not is_template: transit_status = s3db.req_status_opts.get(record.transit_status, "") try: if site_id and \ record.transit_status in [REQ_STATUS_PARTIAL, REQ_STATUS_COMPLETE] and \ record.fulfil_status in [None, REQ_STATUS_NONE, REQ_STATUS_PARTIAL]: site_record = db(stable.site_id == site_id).select(stable.uuid, stable.instance_type, limitby=(0, 1)).first() instance_type = site_record.instance_type table = s3db[instance_type] query = (table.uuid == site_record.uuid) id = db(query).select(table.id, limitby=(0, 1)).first().id transit_status = SPAN(transit_status, A(T("Incoming Shipments"), _href = URL(c = instance_type.split("_")[0], f = "incoming", vars = {"viewing" : "%s.%s" % (instance_type, id)} ) ) ) except: pass transit_status = (TH("%s: " % T("Transit Status")), transit_status) else: transit_status = ("") table = r.table if settings.get_req_use_req_number() and not is_template: headerTR = TR(TH("%s: " % table.req_ref.label), TD(table.req_ref.represent(record.req_ref, show_link=True)) ) else: headerTR = TR(TD(settings.get_req_form_name(), _colspan=2, _class="pdf_title"), ) if site_id: org_id = db(stable.site_id == site_id).select(stable.organisation_id, limitby=(0, 1) ).first().organisation_id logo = s3db.org_organisation_logo(org_id) if logo: headerTR.append(TD(logo, _colspan=2)) if is_template: commit_status = ("") fulfil_status = ("") row1 = "" row3 = "" else: if use_commit: commit_status = (TH("%s: " % table.commit_status.label), table.commit_status.represent(record.commit_status)) else: commit_status = ("") fulfil_status = (TH("%s: " % table.fulfil_status.label), table.fulfil_status.represent(record.fulfil_status)) row1 = TR(TH("%s: " % table.date.label), table.date.represent(record.date), *commit_status ) row3 = TR(TH("%s: " % table.date_required.label), table.date_required.represent(record.date_required), *fulfil_status ) rData = TABLE(headerTR, row1, TR(TH("%s: " % table.site_id.label), table.site_id.represent(site_id), *transit_status ), TR(TH("%s: " % table.requester_id.label), table.requester_id.represent(record.requester_id), ), row3, TR(TH("%s: " % table.purpose.label), record.purpose ), TR(TH("%s: " % table.comments.label), TD(record.comments or "", _colspan=3) ), ) rheader = DIV(rData, rheader_tabs, ) return rheader return None # ============================================================================= def req_match(): """ Function to be called from controller functions to display all requests as a tab for a site. """ T = current.T s3db = current.s3db s3 = current.response.s3 request = current.request settings = current.deployment_settings output = dict() viewing = request.get_vars.get("viewing", None) if not viewing: return output if "." in viewing: tablename, id = viewing.split(".", 1) else: return output table = s3db[tablename] site_id = current.db(table.id == id).select(table.site_id, limitby=(0, 1) ).first().site_id actions = [dict(url = URL(c = "req", f = "req", args = ["[id]", "check"], vars = {"site_id": site_id} ), _class = "action-btn", label = str(T("Check")), ) ] if settings.get_req_use_commit(): actions.append( dict(url = URL(c = "req", f = "commit_req", args = ["[id]"], vars = {"site_id": site_id} ), _class = "action-btn", label = str(T("Commit")), ) ) actions.append( dict(url = URL(c = "req", f = "send_req", args = ["[id]"], vars = {"site_id": site_id} ), _class = "action-btn dispatch", label = str(T("Send")), ) ) s3.actions = actions if tablename == "org_office": rheader = s3db.org_rheader elif tablename == "org_facility": rheader = s3db.org_facility_rheader elif tablename == "inv_warehouse": rheader = s3db.inv_rheader elif tablename == "cr_shelter": rheader = s3db.cr_shelter_rheader elif tablename == "hms_hospital": rheader = s3db.hms_hospital_rheader else: rheader = None s3.filter = (s3db.req_req.site_id != site_id) s3db.configure("req_req", insertable=False) # Pre-process def prep(r): # Plugin OrgRoleManager auth = current.auth if auth.user is not None and \ tablename in S3OrgRoleManager.ENTITY_TYPES: sr = auth.get_system_roles() realms = auth.user.realms or Storage() if sr.ADMIN in realms or sr.ORG_ADMIN in realms and \ (realms[sr.ORG_ADMIN] is None or \ r.record.pe_id in realms[sr.ORG_ADMIN]): r.set_handler("roles", S3OrgRoleManager()) return True s3.prep = prep # Post-process def postp(r, output): if r.representation == "html": output["title"] = s3.crud_strings[tablename].title_display return output s3.postp = postp output = current.rest_controller("req", "req", rheader=rheader) return output # ============================================================================= def req_job_reset(r, **attr): """ RESTful method to reset a job status from FAILED to QUEUED, for "Reset" action button """ if r.interactive: if r.component and r.component.alias == "job": job_id = r.component_id if job_id: S3Task.reset(job_id) current.session.confirmation = current.T("Job reactivated") r.component_id = None redirect(r.url(method="")) # ============================================================================= def req_job_run(r, **attr): """ RESTful method to run a job now, for "Run Now" action button """ if r.interactive: if r.id: current.s3task.async("req_add_from_template", [r.id], # args {"user_id":current.auth.user.id} # vars ) current.session.confirmation = current.T("Request added") r.component_id = None redirect(r.url(method="")) # ============================================================================= def req_add_from_template(req_id): """ Add a Request from a Template """ fieldnames = ["type", "priority", "site_id", "purpose", "requester_id", "comments", ] db = current.db s3db = current.s3db table = s3db.req_req fields = [table[field] for field in fieldnames] # Load Template template = db(table.id == req_id).select(limitby=(0, 1), *fields).first() data = {"is_template": False} try: for field in fieldnames: data[field] = template[field] except: raise "Template not found: %s" % req_id settings = current.deployment_settings if settings.get_req_use_req_number(): code = s3db.supply_get_shipping_code(settings.get_req_shortname(), template.site_id, table.req_ref, ) data["req_ref"] = code id = table.insert(**data) if template.type == 1: # Copy across req_item table = s3db.req_req_item fieldnames = ["site_id", "item_id", "item_pack_id", "quantity", "pack_value", "currency", "comments", ] fields = [table[field] for field in fieldnames] items = db(table.req_id == req_id).select(*fields) for item in items: data = {"req_id": id} for field in fieldnames: data[field] = item[field] table.insert(**data) elif template.type == 3: # Copy across req_skill table = s3db.req_req_skill fieldnames = ["site_id", "task", "skill_id", "quantity", "comments", ] fields = [table[field] for field in fieldnames] skills = db(table.req_id == req_id).select(*fields) for skill in skills: data = {"req_id": id} for field in fieldnames: data[field] = skill[field] table.insert(**data) return id # ============================================================================= def req_customize_req_fields(): """ Customize req_req fields for the Home page & dataList view - this assumes Simple Requests (i.e. type 'Other') """ # Truncate purpose field from s3.s3utils import s3_trunk8 s3_trunk8(lines=2) T = current.T db = current.db s3db = current.s3db s3 = current.response.s3 tablename = "req_req" table = s3db.req_req crud_fields = ["date", #"priority", "site_id", #"is_template", "requester_id", "purpose", ] request = current.request args = request.args if "update.popup" in args or \ "update" in args: field = table.req_status field.writable = True field.requires = IS_IN_SET({REQ_STATUS_NONE: T("Open"), REQ_STATUS_PARTIAL: T("Responded"), REQ_STATUS_COMPLETE: T("Resolved"), REQ_STATUS_CANCEL: T("Cancelled"), }) crud_fields.append("req_status") crud_form = S3SQLCustomForm(*crud_fields) list_fields = crud_fields + ["site_id$location_id", "site_id$location_id$level", "site_id$location_id$parent", "site_id$organisation_id", "site_id$comments", ] table.type.default = 9 # Other field = table.purpose field.label = T("Request") field.requires = IS_NOT_EMPTY(error_message=T("Please enter details of the Request")) field.represent = lambda body: XML(s3_URLise(body)) field = table.date field.label = T("Date") # Make mandatory requires = field.requires field.requires = requires.other field = table.site_id site_id = request.get_vars.get("~.(site)", None) if site_id: field.default = site_id field.readable = field.writable = False # Lookup Site Contact script = \ '''var fieldname='req_req_requester_id' var real_input=$('#'+fieldname) $.when(S3.addPersonWidgetReady(fieldname)).then( function(status){real_input.data('lookup_contact')(fieldname,%s)}, function(status){s3_debug(status)}, function(status){s3_debug(status)})''' % site_id s3.jquery_ready.append(script) else: # If the Requester is blank, then lookup default Site Contact script = \ '''$('#req_req_site_id').change(function(){ var site_id=$(this).val() if(site_id){ var fieldname='req_req_requester_id' var real_input=$('#'+fieldname) if(!real_input.val()&&!$('#req_req_requester_id_full_name').val()){ real_input.data('lookup_contact')(fieldname,site_id) }}})''' s3.jquery_ready.append(script) organisation_id = request.get_vars.get("~.(organisation)", None) if organisation_id: # Restrict to Sites belonging to this Org # @ToDo: Handle Branches filterby = "organisation_id" filter_opts = [organisation_id] # No need to use Site Autocomplete in this case field.widget = None else: filterby = None filter_opts = [] field.label = T("Requested for Site") #site_represent = s3db.org_SiteRepresent(show_link=False, # show_type=False) site_represent = S3Represent(lookup="org_site") field.represent = site_represent field.requires = IS_ONE_OF(db, "org_site.site_id", site_represent, filterby = filterby, filter_opts = filter_opts, not_filterby = "obsolete", not_filter_opts = [True], orderby = "org_site.name", sort = True, ) field.comment = S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id", parent="req"), title=T("Add New Site"), ) db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ") field = table.requester_id field.requires = IS_ADD_PERSON_WIDGET2() field.widget = S3AddPersonWidget2(controller="pr") # Which levels of Hierarchy are we using? hierarchy = current.gis.get_location_hierarchy() levels = hierarchy.keys() if len(current.deployment_settings.get_gis_countries()) == 1 or \ s3.gis.config.region_location_id: levels.remove("L0") filter_widgets = [ S3TextFilter(["requester_id$first_name", "requester_id$middle_name", "requester_id$last_name", "site_id$name", "purpose", #"comments", ], label = T("Search"), comment=T("Search for a request by Site name, Requester name or free text."), ), #S3OptionsFilter("transit_status", # label = T("Transit Status"), # options = s3db.req_status_opts, # cols = 3, # ), #S3OptionsFilter("fulfil_status", # label = T("Fulfill Status"), # options = s3db.req_status_opts, # cols = 3, # ), S3LocationFilter("site_id$location_id", levels=levels, widget="multiselect", #hidden=True, ), S3OptionsFilter("site_id", label=T("Requested For Site"), widget="multiselect", hidden=True, ), S3DateFilter("date", label=T("Date"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for requests made between these dates."), hidden=True, ), #S3DateFilter("date_required", # label=T("Date Needed By"), # hide_time=True, # input_labels = {"ge": "From", "le": "To"}, # comment=T("Search for requests required between these dates."), # hidden=True, # ), ] # @ToDo: deployment_setting if current.auth.s3_has_role("EDITOR"): filter_widgets.insert(-1, S3OptionsFilter("created_by", label=T("Logged By"), widget="multiselect", hidden=True, )) # Return to Requests view after create/update/delete (unless done via Modal) url_next = URL(c="req", f="req", args="datalist") s3db.configure(tablename, create_next = url_next, crud_form = crud_form, delete_next = url_next, filter_formstyle = filter_formstyle, filter_widgets = filter_widgets, # We want the Create form to be in a modal, not inline, for consistency listadd = False, list_fields = list_fields, list_layout = req_req_list_layout, update_next = url_next, ) return table # ============================================================================= def req_req_list_layout(list_id, item_id, resource, rfields, record): """ Default dataList item renderer for Requests on the Home page & dataList view @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["req_req.id"] item_class = "thumbnail" raw = record._row date = record["req_req.date"] body = record["req_req.purpose"] location = record["org_site.location_id"] or "" level = raw["gis_location.level"] if level: location_id = raw["org_site.location_id"] else: location_id = raw["gis_location.parent"] if location_id: location_url = URL(c="gis", f="location", args=[location_id, "profile"]) else: location_url = "#" organisation = record["org_site.organisation_id"] or "" organisation_id = raw["org_site.organisation_id"] org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"]) person = record["req_req.requester_id"] person_id = raw["req_req.requester_id"] person_url = URL(c="pr", f="person", args=[person_id]) person = A(person, _href=person_url, ) # Avatar # Try Organisation Logo db = current.db otable = db.org_organisation row = db(otable.id == organisation_id).select(otable.logo, limitby=(0, 1) ).first() if row and row.logo: logo = URL(c="default", f="download", args=[row.logo]) avatar = IMG(_src=logo, _height=50, _width=50, _style="padding-right:5px;", _class="media-object") avatar = A(avatar, _href=org_url, _class="pull-left", ) else: # Personal Avatar avatar = s3_avatar_represent(person_id, tablename="pr_person", _class="media-object") avatar = A(avatar, _href=person_url, _class="pull-left", ) # Edit Bar T = current.T auth = current.auth permit = auth.s3_has_permission table = db.req_req if permit("update", table, record_id=record_id): edit_btn = A(I(" ", _class="icon icon-edit"), _href=URL(c="req", f="req", args=[record_id, "update.popup"], vars={"refresh": list_id, "record": record_id}), _class="s3_modal", _title=T("Edit Request"), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(I(" ", _class="icon icon-trash"), _class="dl-item-delete", ) else: delete_btn = "" edit_bar = DIV(edit_btn, delete_btn, _class="edit-bar fright", ) s3db = current.s3db site = record["req_req.site_id"] site_id = raw["req_req.site_id"] table = s3db.org_facility facility = db(table.site_id == site_id).select(table.id, limitby=(0, 1) ).first() if facility: site_url = URL(c="org", f="facility", args=[facility.id, "profile"]) opts = dict(_href=site_url) site_comments = raw["org_site.comments"] or "" if site_comments: opts["_class"] = "s3-popover" opts["_data-toggle"] = "popover" opts["_data-content"] = site_comments site_link = A(site, **opts) card_title = TAG[""](I(_class="icon icon-request"), SPAN(site_link, _class="card-title")) else: card_title = TAG[""](I(_class="icon icon-request"), SPAN(" ", _class="card-title")) #if priority == 3: # # Apply additional highlighting for High Priority # item_class = "%s disaster" % item_class # Tallies # NB We assume that all records are readable here table = s3db.req_commit query = (table.deleted == False) & \ (table.req_id == record_id) tally_commits = db(query).count() #if permit("create", table): if auth.is_logged_in(): _class="s3_modal btn" commit_url = URL(c="req", f="commit", args=["create.popup"], vars={"req_id": record_id, "refresh": list_id, "record": record_id, }, ) else: _class="btn" next = "/%s/req/commit/create?req_id=%s" % (current.request.application, record_id) commit_url = URL(c="default", f="user", args="login", vars={"_next": next, }, ) commit_btn = A(I(" ", _class="icon icon-truck"), " ", T("DONATE"), _href=commit_url, _class=_class, _title=T("Donate to this Request"), ) # Render the item item = DIV(DIV(card_title, SPAN(A(location, _href=location_url, ), _class="location-title", ), SPAN(date, _class="date-title", ), edit_bar, _class="card-header", ), DIV(avatar, DIV(DIV(SPAN(body, _class="s3-truncate"), DIV(person, " - ", A(organisation, _href=org_url, _class="card-organisation", ), _class="card-person", ), _class="media pull-left", ), DIV(P(A(T("Donations"), _href=URL(c="req", f="req", args=[record_id, "profile"], ), ), SPAN(tally_commits, _class="badge", ), _class="tally", ), commit_btn, _class="media pull-right", ), _class="media-body", ), _class="media", ), #docs, _class=item_class, _id=item_id, ) return item # ============================================================================= def req_customize_commit_fields(): """ Customize req_commit fields for the Home page & dataList view """ # Truncate comments field from s3.s3utils import s3_trunk8 s3_trunk8(lines=2) T = current.T s3db = current.s3db s3 = current.response.s3 settings = current.deployment_settings tablename = "req_commit" table = s3db.req_commit list_fields = [#"req_id", # populated automatically or not at all? "organisation_id", "committer_id", "comments", "date_available", # We'd like to be able to map donations, but harder for users to enter data #"location_id", ] if settings.get_req_commit_value(): list_fields += ["value", "currency", ] request = current.request args = request.args if "create.popup" in args or \ "create" in args: req_id = request.get_vars.get("req_id", None) if req_id: table.req_id.default = req_id elif not settings.get_req_commit_without_request(): current.session.error = T("Not allowed to Donate without matching to a Request!") redirect(URL(c="req", f="req", args=["datalist"])) elif "update.popup" in args or \ "update" in args: list_fields.append("cancel") # CRUD strings #ADD_COMMIT = T("Make Donation") ADD_COMMIT = T("Add Donation") s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT, title_display = T("Donation Details"), title_list = T("Donations"), title_update = T("Edit Donation"), label_list_button = T("List Donations"), label_delete_button = T("Delete Donation"), msg_record_created = T("Donation Added"), msg_record_modified = T("Donation Updated"), msg_record_deleted = T("Donation Canceled"), msg_list_empty = T("No Donations")) auth = current.auth # @ToDo: deployment_setting if auth.s3_has_role("EDITOR"): editor = True else: editor = False field = table.committer_id if editor: field.requires = IS_ADD_PERSON_WIDGET2() field.widget = S3AddPersonWidget2(controller="pr") field.default = None else: field.writable = False #field = table.location_id #field.represent = s3db.gis_LocationRepresent(sep=" | ") # Required #field.requires = IS_LOCATION_SELECTOR2() field = table.comments field.label = T("Donation") field.represent = lambda body: XML(s3_URLise(body)) field.required = True # @ToDo field.comment = None table.date_available.default = current.request.utcnow field = table.organisation_id field.readable = True field.comment = S3AddResourceLink(c="org", f="organisation_id", title=T("Create Organization"), ) if settings.get_org_autocomplete(): # Enable if there are many Orgs field.widget = S3OrganisationAutocompleteWidget() if editor: # Editor can select Org field.writable = True crud_form = S3SQLCustomForm(*list_fields) elif auth.user and auth.user.organisation_id: field.default = auth.user.organisation_id field.writable = False crud_form = S3SQLCustomForm(*list_fields) else: # Only a User representing an Org can commit for an Org field.default = None field.writable = False crud_fields = [f for f in list_fields if f != "organisation_id"] crud_form = S3SQLCustomForm(*crud_fields) filter_widgets = [ S3TextFilter(["committer_id$first_name", "committer_id$middle_name", "committer_id$last_name", "site_id$name", "comments", "req_id$name", "organisation_id$name" ], label = T("Search"), comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), ), S3LocationFilter("location_id", widget="multiselect", hidden=True, ), #S3DateFilter("date", # label=T("Date"), # hide_time=True, # input_labels = {"ge": "From", "le": "To"}, # comment=T("Search for commitments made between these dates."), # hidden=True, # ), S3DateFilter("date_available", label=T("Date Available"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for commitments available between these dates."), hidden=True, ), ] # Return to Requests view after create/update/delete (unless done via Modal) url_next = URL(c="req", f="req", args="datalist") s3db.configure(tablename, create_next = url_next, crud_form = crud_form, delete_next = url_next, filter_formstyle = filter_formstyle, filter_widgets = filter_widgets, # We want the Create form to be in a modal, not inline, for consistency listadd = False, list_fields = list_fields, list_layout = req_commit_list_layout, update_next = url_next, ) return table # ============================================================================= def req_commit_list_layout(list_id, item_id, resource, rfields, record): """ Default dataList item renderer for Commits on the Home page & dataList view @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["req_commit.id"] item_class = "thumbnail" raw = record._row date = record["req_commit.date_available"] body = record["req_commit.comments"] title = "" #location = record["req_commit.location_id"] #location_id = raw["req_commit.location_id"] #location_url = URL(c="gis", f="location", args=[location_id, "profile"]) person = record["req_commit.committer_id"] person_id = raw["req_commit.committer_id"] person_url = URL(c="pr", f="person", args=[person_id]) person = A(person, _href=person_url, ) organisation_id = raw["req_commit.organisation_id"] if organisation_id: organisation = record["req_commit.organisation_id"] org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"]) organisation = A(organisation, _href=org_url, _class="card-organisation", ) organisation = TAG[""](" - ", organisation) # Use Organisation Logo # @ToDo: option for Personal Avatar (fallback if no Org Logo?) db = current.db otable = db.org_organisation row = db(otable.id == organisation_id).select(otable.logo, limitby=(0, 1) ).first() if row and row.logo: logo = URL(c="default", f="download", args=[row.logo]) else: logo = URL(c="static", f="img", args="blank-user.gif") avatar = IMG(_src=logo, _height=50, _width=50, _style="padding-right:5px;", _class="media-object") avatar = A(avatar, _href=org_url, _class="pull-left", ) else: organisation = "" # Personal Avatar avatar = s3_avatar_represent(person_id, tablename="pr_person", _class="media-object") avatar = A(avatar, _href=person_url, _class="pull-left", ) # Edit Bar permit = current.auth.s3_has_permission table = current.s3db.req_commit if permit("update", table, record_id=record_id): edit_btn = A(I(" ", _class="icon icon-edit"), _href=URL(c="req", f="commit", args=[record_id, "update.popup"], vars={"refresh": list_id, "record": record_id}), _class="s3_modal", _title=current.T("Edit Donation"), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(I(" ", _class="icon icon-trash"), _class="dl-item-delete", ) else: delete_btn = "" edit_bar = DIV(edit_btn, delete_btn, _class="edit-bar fright", ) card_label = TAG[""](I(_class="icon icon-offer"), SPAN(" %s" % title, _class="card-title")) # Render the item item = DIV(DIV(card_label, #SPAN(A(location, # _href=location_url, # ), # _class="location-title", # ), SPAN(date, _class="date-title", ), edit_bar, _class="card-header", ), DIV(avatar, DIV(DIV(SPAN(body, _class="s3-truncate"), DIV(person, organisation, _class="card-person", ), _class="media", ), _class="media-body", ), _class="media", ), #docs, _class=item_class, _id=item_id, ) return item # ----------------------------------------------------------------------------- def filter_formstyle(row_id, label, widget, comment, hidden=False): """ Custom Formstyle for FilterForm @param row_id: HTML id for the row @param label: the label @param widget: the form widget @param comment: the comment @param hidden: whether the row should initially be hidden or not """ if hidden: _class = "advanced hide" else: _class= "" if not label: label = "" if comment: if current.response.s3.rtl: dir = "fleft" else: dir = "fright" comment = DIV(_class = "tooltip %s" % dir, _title = "%s|%s" % (label[0][:-1], comment), ) else: comment = "" return DIV(label, widget, comment, _id=row_id, _class=_class, ) # END =========================================================================
# -*- coding: utf-8 -*- """ Sahana Eden Request Model @copyright: 2009-2013 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ["S3RequestModel", "S3RequestItemModel", "S3RequestSkillModel", "S3RequestRecurringModel", "S3RequestSummaryModel", "S3RequestTaskModel", "S3CommitModel", "S3CommitItemModel", "S3CommitPersonModel", "S3CommitSkillModel", "req_item_onaccept", "req_update_status", "req_rheader", "req_match", "req_add_from_template", "req_customize_req_fields", "req_req_list_layout", "req_customize_commit_fields", "req_commit_list_layout", ] from gluon import * from gluon.storage import Storage from ..s3 import * from s3layouts import S3AddResourceLink REQ_STATUS_NONE = 0 REQ_STATUS_PARTIAL = 1 REQ_STATUS_COMPLETE = 2 REQ_STATUS_CANCEL = 3 # ============================================================================= class S3RequestModel(S3Model): """ """ names = ["req_req", "req_req_id", "req_req_ref", "req_hide_quantities", "req_inline_form", "req_create_form_mods", "req_prep", "req_tabs", "req_priority_opts", ] def model(self): T = current.T db = current.db auth = current.auth session = current.session s3 = current.response.s3 settings = current.deployment_settings person_id = self.pr_person_id messages = current.messages NONE = messages["NONE"] UNKNOWN_OPT = messages.UNKNOWN_OPT AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP s3_string_represent = lambda str: str if str else NONE add_components = self.add_components crud_strings = s3.crud_strings set_method = self.set_method super_link = self.super_link # Multiple Item/Skill Types per Request? multiple_req_items = settings.get_req_multiple_req_items() req_status_writable = settings.get_req_status_writable() req_status_opts = {REQ_STATUS_NONE: SPAN(T("None"), _class = "req_status_none"), REQ_STATUS_PARTIAL: SPAN(T("Partial"), _class = "req_status_partial"), REQ_STATUS_COMPLETE: SPAN(T("Complete"), _class = "req_status_complete"), } req_status = S3ReusableField("req_status", "integer", label = T("Request Status"), requires = IS_NULL_OR( IS_IN_SET(req_status_opts, zero = None)), represent = lambda opt: \ req_status_opts.get(opt, UNKNOWN_OPT), default = REQ_STATUS_NONE, writable = req_status_writable, ) req_ref = S3ReusableField("req_ref", "string", label = T("%(REQ)s Number") % # dict(REQ=settings.get_req_shortname()), writable = False, represent = self.req_ref_represent, ) req_priority_opts = {3: T("High"), 2: T("Medium"), 1: T("Low") } req_types_deployed = settings.get_req_req_type() req_type_opts = {} if settings.has_module("inv") and "Stock" in req_types_deployed: # Number hardcoded in controller & JS req_type_opts[1] = settings.get_req_type_inv_label() #if settings.has_module("asset") and "Asset" in req_types_deployed: # req_type_opts[2] = T("Assets") if settings.has_module("hrm") and "People" in req_types_deployed: req_type_opts[3] = settings.get_req_type_hrm_label() #if settings.has_module("cr") and "Shelter" in req_types_deployed: # req_type_opts[4] = T("Shelter") if "Other" in req_types_deployed: req_type_opts[9] = T("Other") use_commit = settings.get_req_use_commit() req_ask_security = settings.get_req_ask_security() req_ask_transport = settings.get_req_ask_transport() date_writable = settings.get_req_date_writable() requester_label = settings.get_req_requester_label() requester_is_author = settings.get_req_requester_is_author() if requester_is_author: site_default = auth.user.site_id if auth.is_logged_in() else None requester_default = auth.s3_logged_in_person() else: site_default = None requester_default = None # Dropdown or Autocomplete? if settings.get_org_site_autocomplete(): site_widget = S3SiteAutocompleteWidget() site_comment = S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id"), title=T("Create Facility"), tooltip=AUTOCOMPLETE_HELP) else: site_widget = None site_comment = S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id"), title=T("Create Facility")) # --------------------------------------------------------------------- # Requests tablename = "req_req" self.define_table(tablename, super_link("doc_id", "doc_entity"), # @ToDo: Replace with Link Table self.event_event_id( default=session.s3.event, readable = False, writable = False, ondelete="SET NULL"), Field("type", "integer", requires = IS_IN_SET(req_type_opts, zero=None), represent = lambda opt: \ req_type_opts.get(opt, UNKNOWN_OPT), label = T("Request Type")), req_ref(), s3_datetime(label = T("Date Requested"), default="now", past=8760, # Hours, so 1 year future=0, readable=date_writable, writable=date_writable, #represent="date", #widget="date", ), Field("priority", "integer", default = 2, label = T("Priority"), #@ToDo: Colour code the priority text - red, orange, green represent = lambda opt: \ req_priority_opts.get(opt, UNKNOWN_OPT), #represent = self.req_priority_represent, requires = IS_NULL_OR( IS_IN_SET(req_priority_opts)) ), # This is a component, so needs to be a super_link # - can't override field name, ondelete or requires super_link("site_id", "org_site", comment = site_comment, default = site_default, empty = False, filterby = "obsolete", filter_opts = [False], instance_types = auth.org_site_types, label = T("Requested For Facility"), readable = True, represent = self.org_site_represent, updateable = True, widget = site_widget, writable = True, ), #Field("location", # label = T("Neighborhood")), # Donations: What will the Items be used for?; People: Task Details s3_comments("purpose", comment = "", label = T("Purpose"), # Only-needed for summary mode (unused) #represent = self.req_purpose_represent, represent = s3_string_represent, ), Field("is_template", "boolean", label = T("Recurring Request?"), represent = s3_yes_no_represent, default = False, comment = DIV(_class="tooltip", _title="%s|%s" % (T("Recurring Request?"), T("If this is a request template to be added repeatedly then the schedule can be set on the next page."))), ), s3_datetime("date_required", label = T("Date Needed By"), past=1, # Allow time for people to fill out form future=8760, # Hours, so 1 year #represent="date", #widget="date", ), s3_datetime("date_required_until", label = T("Date Required Until"), past=0, future=8760, # Hours, so 1 year readable = False, writable = False ), person_id("requester_id", label = requester_label, empty = settings.get_req_requester_optional(), #writable = False, comment = S3AddResourceLink(c="pr", f="person", vars = dict(child="requester_id", parent="req"), title=crud_strings["pr_person"].label_create, tooltip=AUTOCOMPLETE_HELP), default = requester_default ), person_id("assigned_to_id", # This field should be in req_commit, but that complicates the UI readable = False, writable = False, label = T("Assigned To") ), person_id("approved_by_id", label = T("Approved By"), readable = False, writable = False, ), person_id("request_for_id", label = T("Requested For"), readable = False, writable = False, #default = auth.s3_logged_in_person() ), Field("transport_req", "boolean", represent = s3_yes_no_represent, readable = req_ask_transport, writable = req_ask_transport, label = T("Transportation Required")), Field("security_req", "boolean", represent = s3_yes_no_represent, readable = req_ask_security, writable = req_ask_security, label = T("Security Required")), s3_datetime("date_recv", label = T("Date Received"), # Could be T("Date Delivered") - make deployment_setting past=8760, # Hours, so 1 year future=0, readable = False, writable = False, ), person_id("recv_by_id", label = T("Received By"), # @ToDo: Set this in Update forms? Dedicated 'Receive' button? # (Definitely not in Create forms) #default = auth.s3_logged_in_person() ), # Simple Status # - currently just enabled in customize_req_fields() workflow req_status(readable = False, writable = False, ), # Detailed Status req_status("commit_status", readable = use_commit, writable = req_status_writable and use_commit, represent = self.req_commit_status_represent, label = T("Commit. Status")), req_status("transit_status", label = T("Transit Status")), req_status("fulfil_status", label = T("Fulfil. Status")), Field("closed", "boolean", label = T("Closed"), comment = DIV(_class="tooltip", _title="%s|%s" % (T("Closed"), T("No more items may be added to this request"))), default = False), Field("cancel", "boolean", label = T("Cancel"), default = False), Field.Method("details", req_req_details), Field.Method("drivers", req_req_drivers), s3_comments(comment=""), *s3_meta_fields()) # @todo: make lazy_table table = db[tablename] if len(req_type_opts) == 1: k, v = req_type_opts.items()[0] field = table.type field.default = k field.writable = False field.readable = False if not settings.get_req_use_req_number(): table.req_ref.readable = False table.req_ref.writable = False # CRUD strings ADD_REQUEST = T("Make Request") crud_strings[tablename] = Storage( label_create = ADD_REQUEST, title_display = T("Request Details"), title_list = T("Requests"), title_map=T("Map of Requests"), title_report = T("Requests Report"), title_update = T("Edit Request"), label_list_button = T("List Requests"), label_delete_button = T("Delete Request"), msg_record_created = T("Request Added"), msg_record_modified = T("Request Updated"), msg_record_deleted = T("Request Canceled"), msg_list_empty = T("No Requests")) # Which levels of Hierarchy are we using? hierarchy = current.gis.get_location_hierarchy() levels = hierarchy.keys() if len(settings.get_gis_countries()) == 1 or \ s3.gis.config.region_location_id: try: levels.remove("L0") except: pass filter_widgets = [ #S3TextFilter(["committer_id$first_name", # "committer_id$middle_name", # "committer_id$last_name", # "site_id$name", # "comments", # "req_id$name", # "organisation_id$name" # ], # label = T("Search") # comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), # ), S3OptionsFilter("transit_status", label = T("Transit Status"), options = req_status_opts, cols = 3, ), S3OptionsFilter("fulfil_status", label = T("Fulfill Status"), options = req_status_opts, cols = 3, ), S3LocationFilter("site_id$location_id", levels=levels, widget="multiselect", hidden=True, ), S3OptionsFilter("site_id", label=T("Requested For Facility"), widget="multiselect", hidden=True, ), S3OptionsFilter("created_by", label=T("Logged By"), widget="multiselect", hidden=True, ), S3DateFilter("date", label=T("Date"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for requests made between these dates."), hidden=True, ), S3DateFilter("date_required", label=T("Date Needed By"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for requests required between these dates."), hidden=True, ), ] if "Stock" in req_type_opts: filter_widgets.insert(4, S3OptionsFilter("item_category.name", label = T("Item Category"), widget = "multiselect", hidden=True, )) if len(req_type_opts) > 1: filter_widgets.insert(2, S3OptionsFilter("type", label=T("Type"), cols = len(req_type_opts), hidden=True, )) if use_commit: filter_widgets.insert(2, S3OptionsFilter("commit_status", label = T("Commit Status"), options = req_status_opts, cols = 3, )) report_fields = ["priority", "site_id$organisation_id", #"site_id$location_id$L1", #"site_id$location_id$L2", "site_id$location_id$L3", "site_id$location_id$L4", "site_id", ] # @ToDo: id gets stripped in _select_field fact_fields = report_fields + [(T("Requests"), "id")] # Reusable Field represent = self.req_represent req_id = S3ReusableField("req_id", "reference %s" % tablename, requires = IS_NULL_OR( IS_ONE_OF(db, "req_req.id", lambda id, row: represent(id, row, show_link=False), orderby="req_req.date", sort=True) ), represent = represent, sortby = "date", label = T("Request"), ondelete = "CASCADE", ) list_fields = ["id", "date", "date_required", "site_id", "requester_id", #"event_id", # @ToDo: Vary by deployment_setting (easy) # @ToDo: Allow a single column to support different components based on type # @ToDo: Include Qty too (Computed VF in component?) #(T("Items"), "item.item_id"), #(T("Skills"), "skill.skill_id"), ] if settings.get_req_use_req_number(): list_fields.insert(1, "req_ref") #if len(settings.get_req_req_type()) > 1: # list_fields.append("type") list_fields.append((T("Drivers"), "drivers")) list_fields.append("priority") # @ToDo: Deprecate with type-based components (see above) list_fields.append((T("Details"), "details")) if use_commit: list_fields.append("commit_status") list_fields.append("transit_status") list_fields.append("fulfil_status") list_fields.append((T("Committed By"), "commit.site_id")) self.configure(tablename, onaccept = self.req_onaccept, ondelete = self.req_req_ondelete, deduplicate = self.req_req_duplicate, listadd = False, orderby = "req_req.date desc", filter_widgets = filter_widgets, report_options = Storage( rows=report_fields, cols=report_fields, fact=fact_fields, methods=["count", "list", "sum"], defaults=Storage(rows="site_id$location_id$L4", cols="priority", fact="count(id)", totals=True, ) ), list_fields = list_fields, extra_fields = ["req_ref", "type"], context = {"event": "event_id", "location": "site_id$location_id", "organisation": "site_id$organisation_id", "site": "site_id", }, ) # Custom Methods set_method("req", "req", method="check", action=self.req_check) set_method("req", "req", method="commit_all", action=self.req_commit_all) set_method("req", "req", method="copy_all", action=self.req_copy_all) # Print Forms set_method("req", "req", method="form", action=self.req_form) # Components add_components(tablename, # Documents req_document="req_id", # Requested Items req_req_item={"joinby": "req_id", "multiple": multiple_req_items, }, # Requested Skills req_req_skill={"joinby": "req_id", "multiple": multiple_req_items, }, # Commitment req_commit="req_id", # Item Categories supply_item_category={"link": "req_req_item_category", "joinby": "req_id", "key": "item_category_id", }, **{# Scheduler Jobs (for recurring requests) S3Task.TASK_TABLENAME: {"name": "job", "joinby": "req_id", "link": "req_job", "key": "scheduler_task_id", "actuate": "replace", }, } ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_create_form_mods = self.req_create_form_mods, req_hide_quantities = self.req_hide_quantities, req_inline_form = self.req_inline_form, req_prep = self.req_prep, req_priority_opts = req_priority_opts, req_priority_represent = self.req_priority_represent, req_req_id = req_id, req_req_ref = req_ref, req_status_opts = req_status_opts, req_type_opts = req_type_opts, req_tabs = self.req_tabs, ) # ------------------------------------------------------------------------- def defaults(self): """ Safe defaults for model-global names in case module is disabled """ req_ref = S3ReusableField("req_ref", "string", readable=False, writable=False) return dict(req_req_ref = req_ref ) # ------------------------------------------------------------------------- @staticmethod def req_create_form_mods(): """ Function to be called from REST prep functions - main module & components (sites & events) """ T = current.T db = current.db s3 = current.response.s3 settings = current.deployment_settings # Hide fields which don't make sense in a Create form table = db.req_req table.req_ref.readable = False table.commit_status.readable = table.commit_status.writable = False table.transit_status.readable = table.transit_status.writable = False table.fulfil_status.readable = table.fulfil_status.writable = False table.cancel.readable = table.cancel.writable = False table.date_recv.readable = table.date_recv.writable = False table.recv_by_id.readable = table.recv_by_id.writable = False if settings.get_req_requester_from_site(): # Filter the list of Contacts to those for the site table.requester_id.widget = None s3.jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, })''' % T("No contacts yet defined for this site")) #table.site_id.comment = A(T("Set as default Site"), # _id="req_req_site_id_link", # _target="_blank", # _href=URL(c="default", # f="user", # args=["profile"])) req_types = settings.get_req_req_type() if "People" in req_types: # Show the Required Until Field # (gets turned-off by JS for other types) table.date_required_until.writable = True if "type" not in current.request.vars: # Script to inject into Pages which include Request create forms req_helptext = ''' i18n.req_purpose="%s" i18n.req_site_id="%s" i18n.req_request_for_id="%s" i18n.req_recv_by_id="%s" i18n.req_items_purpose="%s" i18n.req_items_site_id="%s" i18n.req_items_recv_by_id="%s" i18n.req_people_purpose="%s" i18n.req_people_site_id="%s" i18n.req_people_recv_by_id="%s" i18n.req_next_msg="%s" i18n.req_other_msg="%s" i18n.req_details_mandatory="%s"''' % (table.purpose.label, table.site_id.label, table.request_for_id.label, table.recv_by_id.label, T("What the Items will be used for"), T("Deliver To"), T("Delivered To"), T("Task Details"), T("Report To"), T("Reported To"), T("Please enter the details on the next screen."), T("Please enter request details here."), T("Details field is required!")) s3.js_global.append(req_helptext) s3.scripts.append("/%s/static/scripts/S3/s3.req_create_variable.js" % current.request.application) else: s3.scripts.append("/%s/static/scripts/S3/s3.req_create.js" % current.request.application) return # ------------------------------------------------------------------------- @staticmethod def req_inline_form(type, method): """ Function to be called from REST prep functions - to add req_item & req_skill components as inline forms """ T = current.T s3db = current.s3db table = s3db.req_req s3 = current.response.s3 postprocess = s3.req_req_postprocess if type == 1: # Dropdown not Autocomplete itable = s3db.req_req_item itable.item_id.widget = None jquery_ready = s3.jquery_ready jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'item_id', 'targetName':'item_pack_id', 'lookupPrefix':'supply', 'lookupResource':'item_pack', 'msgNoRecords':i18n.no_packs, 'fncPrep':S3.supply.fncPrepItem, 'fncRepresent':S3.supply.fncRepresentItem })''') # Custom Form settings = current.deployment_settings fields = ["site_id", "requester_id", "date", "priority", "date_required", S3SQLInlineComponent( "req_item", label = T("Items"), fields = ["item_id", "item_pack_id", "quantity", "comments" ] ), "comments", ] if method == "update": if settings.get_req_status_writable(): fields.insert(7, "fulfil_status") if settings.get_req_show_quantity_transit(): fields.insert(7, "transit_status") if settings.get_req_use_commit(): fields.insert(7, "commit_status") fields.insert(7, "date_recv") if settings.get_req_requester_from_site(): # Filter the list of Contacts to those for the site table.requester_id.widget = None jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, })''' % T("No contacts yet defined for this site")) table.site_id.comment = A(T("Set as default Site"), _id="req_req_site_id_link", _target="_blank", _href=URL(c="default", f="user", args=["profile"])) if settings.get_req_items_ask_purpose(): fields.insert(6, "purpose") if method != "update": fields.insert(1, "is_template") if settings.get_req_use_req_number() and \ not settings.get_req_generate_req_number(): fields.insert(0, "req_ref") if postprocess: crud_form = S3SQLCustomForm(*fields, postprocess=postprocess) else: crud_form = S3SQLCustomForm(*fields) s3db.configure("req_req", crud_form=crud_form) elif type == 3: # Custom Form stable = s3db.req_req_skill stable.skill_id.label = T("Required Skills (optional)") # Custom Form settings = current.deployment_settings fields = ["site_id", "requester_id", "date", "priority", "date_required", "date_required_until", "purpose", S3SQLInlineComponent( "req_skill", label = T("Skills"), fields = ["quantity", "skill_id", "comments" ] ), "comments", ] if method == "update": if settings.get_req_status_writable(): fields.insert(8, "fulfil_status") if settings.get_req_show_quantity_transit(): fields.insert(8, "transit_status") if settings.get_req_use_commit(): fields.insert(8, "commit_status") fields.insert(8, "date_recv") if settings.get_req_requester_from_site(): # Filter the list of Contacts to those for the site table.requester_id.widget = None s3.jquery_ready.append(''' S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, })''' % T("No contacts yet defined for this site")) table.site_id.comment = A(T("Set as default Site"), _id="req_req_site_id_link", _target="_blank", _href=URL(c="default", f="user", args=["profile"])) else: fields.insert(1, "is_template") if settings.get_req_use_req_number() and \ not settings.get_req_generate_req_number(): fields.insert(0, "req_ref") if postprocess: crud_form = S3SQLCustomForm(*fields, postprocess=postprocess) else: crud_form = S3SQLCustomForm(*fields) s3db.configure("req_req", crud_form=crud_form) # ------------------------------------------------------------------------- @staticmethod def req_prep(r): """ Function to be called from REST prep functions - main module & components (sites) """ if not r.component or r.component.name =="req": default_type = current.db.req_req.type.default if default_type: T = current.T req_submit_button = {1:T("Save and add Items"), 3:T("Save and add People")} current.response.s3.crud.submit_button = req_submit_button[default_type] return # ------------------------------------------------------------------------- @staticmethod def req_represent(id, row=None, show_link=True): """ Represent a Request """ if row: table = current.db.req_req elif not id: return current.messages["NONE"] else: id = int(id) if id: db = current.db table = db.req_req row = db(table.id == id).select(table.date, table.req_ref, table.site_id, limitby=(0, 1)).first() try: if row.req_ref: req = row.req_ref else: req = "%s - %s" % (table.site_id.represent(row.site_id, show_link=False), table.date.represent(row.date)) except: return current.messages.UNKNOWN_OPT if show_link: return A(req, _href = URL(c = "req", f = "req", args = [id]), _title = current.T("Go to Request")) else: return req # ------------------------------------------------------------------------- @staticmethod def req_commit_status_represent(opt): """ Represet the Commitment Status of the Request """ if opt == REQ_STATUS_COMPLETE: # Include the Site Name of the Committer if we can # @ToDo: figure out how! return SPAN(current.T("Complete"), _class = "req_status_complete") else: return current.s3db.req_status_opts.get(opt, current.messages.UNKNOWN_OPT) # ------------------------------------------------------------------------- @staticmethod def req_ref_represent(value, show_link=True, pdf=False): """ Represent for the Request Reference if show_link is True then it will generate a link to the record if pdf is True then it will generate a link to the PDF """ if value: if show_link: db = current.db table = db.req_req req_row = db(table.req_ref == value).select(table.id, limitby=(0, 1) ).first() if req_row: if pdf: args = [req_row.id, "form"] else: args = [req_row.id] return A(value, _href = URL(c = "req", f = "req", args = args ), ) return B(value) return current.messages["NONE"] # ------------------------------------------------------------------------- @staticmethod def req_form(r, **attr): """ Generate a PDF of a Request Form """ db = current.db table = db.req_req record = db(table.id == r.id).select(limitby=(0, 1)).first() if record.type == 1: pdf_componentname = "req_item" list_fields = ["item_id", "item_pack_id", "quantity", "quantity_commit", "quantity_transit", "quantity_fulfil", ] elif record.type == 3: pdf_componentname = "req_skill" list_fields = ["skill_id", "quantity", "quantity_commit", "quantity_transit", "quantity_fulfil", ] else: # Not Supported - redirect to normal PDF redirect(URL(args=current.request.args[0], extension="pdf")) if current.deployment_settings.get_req_use_req_number(): filename = record.req_ref else: filename = None exporter = S3Exporter().pdf return exporter(r.resource, request=r, method = "list", pdf_title = current.deployment_settings.get_req_form_name(), pdf_filename = filename, list_fields = list_fields, pdf_hide_comments = True, pdf_componentname = pdf_componentname, pdf_header_padding = 12, #pdf_footer = inv_recv_pdf_footer, pdf_table_autogrow = "B", pdf_paper_alignment = "Landscape", **attr ) # ------------------------------------------------------------------------- @staticmethod def req_copy_all(r, **attr): """ Custom Method to copy an existing Request - creates a req with req_item records """ db = current.db s3db = current.s3db table = s3db.req_req settings = current.deployment_settings now = current.request.now record = r.record req_id = record.id # Make a copy of the request record if settings.get_req_use_req_number(): code = s3db.supply_get_shipping_code(settings.get_req_shortname(), record.site_id, table.req_ref, ) else: code = None if record.date_required and record.date_required < now: date_required = now + datetime.timedelta(days=14) else: date_required = record.date_required new_req_id = table.insert(type = record.type, req_ref = code, date = now, date_required = date_required, priority = record.priority, site_id = record.site_id, purpose = record.purpose, requester_id = record.requester_id, transport_req = record.transport_req, security_req = record.security_req, comments = record.comments ) # Make a copy of each child record if record.type == 1: # Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.id, ritable.item_entity_id, ritable.item_id, ritable.item_pack_id, ritable.quantity, ritable.pack_value, ritable.currency, ritable.site_id, ritable.comments) if items: insert = ritable.insert for item in items: insert(req_id=new_req_id, item_entity_id = item.item_entity_id, item_id = item.item_id, item_pack_id = item.item_pack_id, quantity = item.quantity, pack_value = item.pack_value, currency = item.currency, site_id = item.site_id, comments = item.comments) elif record.type == 3: # People and skills rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.id, rstable.skill_id, rstable.quantity, rstable.site_id, rstable.comments) if skills: insert = rstable.insert for skill in skills: insert(req_id = new_req_id, skill_id = skill.skill_id, quantity = skill.quantity, site_id = skill.site_id, comments = skill.comments) redirect(URL(f="req", args=[new_req_id, "update"])) # ------------------------------------------------------------------------- @staticmethod def req_commit_all(r, **attr): """ Custom Method to commit to a Request - creates a commit with commit_items for each req_item """ T = current.T db = current.db s3db = current.s3db table = s3db.req_commit record = r.record req_id = record.id # Check if there is an existing Commitment query = (table.req_id == req_id) & \ (table.deleted == False) exists = db(query).select(table.id, limitby=(0, 1)) if exists: # Browse existing commitments redirect(URL(f="req", args=[r.id, "commit"])) type = record.type # Create the commitment cid = table.insert(req_id=req_id, type=type) if type == 1: # Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.id, ritable.item_pack_id, ritable.quantity, ritable.comments) if items: citable = s3db.req_commit_item insert = citable.insert for item in items: id = item.id quantity = item.quantity insert(commit_id=cid, req_item_id=id, item_pack_id=item.item_pack_id, quantity=quantity, comments=item.comments) # Mark Item in the Request as Committed db(ritable.id == item.id).update(quantity_commit=quantity) # Mark Request as Committed db(s3db.req_req.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) msg = T("You have committed to all items in this Request. Please check that all details are correct and update as-required.") elif type == 3: # People rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.id, rstable.skill_id, rstable.quantity, rstable.comments) if skills: cstable = s3db.req_commit_skill insert = cstable.insert for skill in skills: id = skill.id quantity = skill.quantity insert(commit_id=cid, skill_id=skill.skill_id, quantity=quantity, comments=skill.comments) # Mark Item in the Request as Committed db(rstable.id == skill.id).update(quantity_commit=quantity) # Mark Request as Committed db(s3db.req_req.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) msg = T("You have committed for all people in this Request. Please check that all details are correct and update as-required.") else: # Other # Mark Request as Committed db(s3db.req_req.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) msg = T("You have committed to this Request. Please check that all details are correct and update as-required.") if "send" in r.args: redirect(URL(f="send_commit", args=[cid])) current.session.confirmation = msg redirect(URL(c="req", f="commit", args=[cid])) # ------------------------------------------------------------------------- @staticmethod def req_priority_represent(id): """ """ src = URL(c="static", f="img", args=["priority", "priority_%d.gif" % (id or 4)] ) return DIV(IMG(_src= src)) # ------------------------------------------------------------------------- @staticmethod def req_hide_quantities(table): """ Hide the Update Quantity Status Fields from Request create forms """ if not current.deployment_settings.get_req_item_quantities_writable(): table.quantity_commit.writable = table.quantity_commit.readable = False table.quantity_transit.writable = table.quantity_transit.readable= False table.quantity_fulfil.writable = table.quantity_fulfil.readable = False # ------------------------------------------------------------------------- @staticmethod def req_tabs(r): """ Add a set of Tabs for a Site's Request Tasks @ToDo: Roll these up like inv_tabs in inv.py """ settings = current.deployment_settings if settings.get_org_site_inv_req_tabs(): s3_has_permission = current.auth.s3_has_permission if settings.has_module("req") and \ s3_has_permission("read", "req_req", c="req"): T = current.T tabs = [(T("Requests"), "req")] if s3_has_permission("read", "req_req", c=current.request.controller, f="req_match"): tabs.append((T("Match Requests"), "req_match/")) if settings.get_req_use_commit(): tabs.append((T("Commit"), "commit")) return tabs return [] # ------------------------------------------------------------------------- @staticmethod def req_check(r, **attr): """ Check to see if your Inventory can be used to match any open Requests """ T = current.T db = current.db s3db = current.s3db response = current.response s3 = response.s3 NONE = current.messages["NONE"] site_id = r.vars.site_id site_name = s3db.org_site_represent(site_id, show_link=False) output = {} output["title"] = T("Check Request") output["rheader"] = req_rheader(r, check_page=True) stable = s3db.org_site ltable = s3db.gis_location query = (stable.id == site_id ) & \ (stable.location_id == ltable.id) location_r = db(query).select(ltable.lat, ltable.lon, limitby=(0, 1)).first() query = (stable.id == r.record.site_id ) & \ (stable.location_id == ltable.id) req_location_r = db(query).select(ltable.lat, ltable.lon, limitby=(0, 1)).first() try: distance = current.gis.greatCircleDistance(location_r.lat, location_r.lon, req_location_r.lat, req_location_r.lon,) output["rheader"][0].append(TR(TH(T("Distance from %s:") % site_name), TD(T("%.1f km") % distance) )) except: pass output["subtitle"] = T("Request Items") use_commit = current.deployment_settings.get_req_use_commit() # Get req_items & inv_items from this site table = s3db.req_req_item query = (table.req_id == r.id ) & \ (table.deleted == False ) req_items = db(query).select(table.id, table.item_id, table.quantity, table.item_pack_id, table.quantity_commit, table.quantity_transit, table.quantity_fulfil) itable = s3db.inv_inv_item query = (itable.site_id == site_id ) & \ (itable.deleted == False ) inv_items_dict = {} inv_items = db(query).select(itable.item_id, itable.quantity, itable.item_pack_id, # VF #itable.pack_quantity, ) for item in inv_items: item_id = item.item_id if item_id in inv_items_dict: inv_items_dict[item_id] += item.quantity * item.pack_quantity() else: inv_items_dict[item_id] = item.quantity * item.pack_quantity() if len(req_items): row = TR(TH(table.item_id.label), TH(table.quantity.label), TH(table.item_pack_id.label), TH(table.quantity_transit.label), TH(table.quantity_fulfil.label), TH(T("Quantity in %s's Warehouse") % site_name), TH(T("Match?")) ) if use_commit: row.insert(3, TH(table.quantity_commit.label)) items = TABLE(THEAD(row), _id = "list", _class = "dataTable display") supply_item_represent = table.item_id.represent item_pack_represent = table.item_pack_id.represent no_match = True for req_item in req_items: # Convert inv item quantity to req item quantity item_id = req_item.item_id if item_id in inv_items_dict: inv_quantity = inv_items_dict[item_id] / req_item.pack_quantity() else: inv_quantity = NONE if inv_quantity != NONE: no_match = False if inv_quantity < req_item.quantity: status = SPAN(T("Partial"), _class = "req_status_partial") else: status = SPAN(T("YES"), _class = "req_status_complete") else: status = SPAN(T("NO"), _class = "req_status_none"), if use_commit: items.append(TR(#A(req_item.id), supply_item_represent(req_item.item_id), req_item.quantity, item_pack_represent(req_item.item_pack_id), # This requires an action btn to get the req_id req_item.quantity_commit, req_item.quantity_transit, req_item.quantity_fulfil, #req_quantity_represent(req_item.quantity_commit, "commit"), #req_quantity_represent(req_item.quantity_fulfil, "fulfil"), #req_quantity_represent(req_item.quantity_transit, "transit"), inv_quantity, status, ) ) else: items.append(TR(#A(req_item.id), supply_item_represent(req_item.item_id), req_item.quantity, item_pack_represent(req_item.item_pack_id), # This requires an action btn to get the req_id req_item.quantity_transit, req_item.quantity_fulfil, #req_quantity_represent(req_item.quantity_fulfil, "fulfil"), #req_quantity_represent(req_item.quantity_transit, "transit"), inv_quantity, status, ) ) output["items"] = items #s3.actions = [req_item_inv_item_btn] s3.no_sspag = True # pag won't work if no_match: current.response.warning = \ T("%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!") % \ dict(site=site_name) else: output["items"] = s3.crud_strings.req_req_item.msg_list_empty response.view = "list.html" s3.no_formats = True return output # ------------------------------------------------------------------------- @staticmethod def req_onaccept(form): """ After DB I/O """ db = current.db s3db = current.s3db request = current.request settings = current.deployment_settings tablename = "req_req" table = s3db.req_req form_vars = form.vars id = form_vars.id if form_vars.get("is_template", None): is_template = True f = "req_template" else: is_template = False f = "req" # If the req_ref is None then set it up if settings.get_req_use_req_number(): record = db(table.id == id).select(table.req_ref, table.site_id, limitby=(0, 1)).first() if not record.req_ref: code = s3db.supply_get_shipping_code(settings.get_req_shortname(), record.site_id, table.req_ref, ) db(table.id == id).update(req_ref = code) req_status = form_vars.get("req_status", None) if req_status is not None: # Translate Simple Status req_status = int(req_status) if req_status == REQ_STATUS_PARTIAL: # read current status record = db(table.id == id).select(table.commit_status, table.fulfil_status, limitby=(0, 1) ).first() data = dict(cancel = False) if record.commit_status != REQ_STATUS_COMPLETE: data["commit_status"] = REQ_STATUS_PARTIAL if record.fulfil_status == REQ_STATUS_COMPLETE: data["fulfil_status"] = REQ_STATUS_PARTIAL db(table.id == id).update(**data) elif req_status == REQ_STATUS_COMPLETE: db(table.id == id).update(fulfil_status = REQ_STATUS_COMPLETE, cancel = False, ) elif req_status == REQ_STATUS_CANCEL: db(table.id == id).update(cancel = True) elif req_status == REQ_STATUS_NONE: db(table.id == id).update(commit_status = REQ_STATUS_NONE, fulfil_status = REQ_STATUS_NONE, cancel = False) if settings.get_req_requester_to_site(): requester_id = form_vars.get("requester_id", None) if requester_id: site_id = form_vars.get("site_id", None) # If the requester has no HR record, then create one hrtable = s3db.hrm_human_resource query = (hrtable.person_id == requester_id) exists = db(query).select(hrtable.id, hrtable.organisation_id, hrtable.site_id, hrtable.site_contact, limitby=(0, 1) ).first() if exists: if site_id and not exists.site_id: # Check that the Request site belongs to this Org stable = s3db.org_site site = db(stable.site_id == site_id).select(stable.organisation_id, limitby=(0, 1) ).first() # @ToDo: Think about branches if site and site.organisation_id == exists.organisation_id: # Set the HR record as being for this site exists.update(site_id = site_id) s3db.hrm_human_resource_onaccept(exists) elif site_id: # Lookup the Org for the site stable = s3db.org_site site = db(stable.site_id == site_id).select(stable.organisation_id, limitby=(0, 1) ).first() # Is there already a site_contact for this site? ltable = s3db.hrm_human_resource_site query = (ltable.site_id == site_id) & \ (ltable.site_contact == True) already = db(query).select(ltable.id, limitby=(0, 1) ).first() if already: site_contact = False else: site_contact = True hr_id = hrtable.insert(person_id = requester_id, organisation_id = site.organisation_id, site_id = site_id, site_contact = site_contact, ) s3db.hrm_human_resource_onaccept(Storage(id=hr_id)) # Configure the next page to go to based on the request type if is_template: s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "job"]), update_next = URL(c="req", f=f, args=["[id]", "job"])) elif not settings.get_req_inline_forms(): if table.type.default: type = table.type.default elif "type" in form_vars: type = int(form_vars.type) else: type = 1 if type == 1 and settings.has_module("inv"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_item"]), update_next = URL(c="req", f=f, args=["[id]", "req_item"])) elif type == 2 and settings.has_module("asset"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_asset"]), update_next = URL(c="req", f=f, args=["[id]", "req_asset"])) elif type == 3 and settings.has_module("hrm"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_skill"]), update_next = URL(c="req", f=f, args=["[id]", "req_skill"])) elif type == 4 and settings.has_module("cr"): s3db.configure(tablename, create_next = URL(c="req", f=f, args=["[id]", "req_shelter"]), update_next = URL(c="req", f=f, args=["[id]", "req_shelter"])) # ------------------------------------------------------------------------- @staticmethod def req_req_ondelete(row): """ Cleanup any scheduled tasks """ db = current.db table = db.scheduler_task query = (table.function_name == "req_add_from_template") & \ (table.args == "[%s]" % row.id) db(query).delete() # ------------------------------------------------------------------------- @staticmethod def req_req_duplicate(job): """ This callback will be called when importing records it will look to see if the record being imported is a duplicate. @param job: An S3ImportJob object which includes all the details of the record being imported If the record is a duplicate then it will set the job method to update Rules for finding a duplicate: - If the Request Number exists then it's a duplicate """ if job.tablename == "req_req": table = job.table if "req_ref" in job.data: request_number = job.data.req_ref else: return query = (table.req_ref == request_number) _duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if _duplicate: job.id = _duplicate.id job.data.id = _duplicate.id job.method = job.METHOD.UPDATE # ============================================================================= class S3RequestItemModel(S3Model): """ """ names = ["req_req_item", "req_item_id", "req_item_represent", "req_req_item_category", ] def model(self): T = current.T db = current.db settings = current.deployment_settings quantities_writable = settings.get_req_item_quantities_writable() use_commit = settings.get_req_use_commit() show_qty_transit = settings.get_req_show_quantity_transit() track_pack_values = settings.get_inv_track_pack_values() define_table = self.define_table req_id = self.req_req_id # ----------------------------------------------------------------- # Request Items # tablename = "req_req_item" define_table(tablename, req_id(empty=False), self.supply_item_entity_id, self.supply_item_id(), self.supply_item_pack_id(), Field("quantity", "double", notnull=True, requires = IS_FLOAT_IN_RANGE(minimum=1), represent=lambda v: \ IS_FLOAT_AMOUNT.represent(v, precision=2)), Field("pack_value", "double", readable=track_pack_values, writable=track_pack_values, label = T("Estimated Value per Pack")), # @ToDo: Move this into a Currency Widget for the pack_value field s3_currency(readable=track_pack_values, writable=track_pack_values), self.org_site_id, Field("quantity_commit", "double", label = T("Quantity Committed"), represent = self.req_qnty_commit_represent, default = 0, requires = IS_FLOAT_IN_RANGE(minimum=0, maximum=999999), readable = use_commit, writable = use_commit and quantities_writable), Field("quantity_transit", "double", label = T("Quantity in Transit"), represent = self.req_qnty_transit_represent, default = 0, requires = IS_FLOAT_IN_RANGE(minimum=0, maximum=999999), readable = show_qty_transit, writable = show_qty_transit and quantities_writable), Field("quantity_fulfil", "double", label = T("Quantity Fulfilled"), represent = self.req_qnty_fulfil_represent, default = 0, requires = IS_FLOAT_IN_RANGE(minimum=0, maximum=999999), writable = quantities_writable), Field.Method("pack_quantity", self.supply_item_pack_quantity(tablename=tablename)), s3_comments(), *s3_meta_fields()) # @todo: make lazy_table table = db[tablename] table.site_id.label = T("Requested From") # CRUD strings ADD_REQUEST_ITEM = T("Add Item to Request") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_REQUEST_ITEM, title_display = T("Request Item Details"), title_list = T("Items in Request"), title_update = T("Edit Item in Request"), label_list_button = T("List Items in Request"), label_delete_button = T("Delete Item from Request"), msg_record_created = T("Item(s) added to Request"), msg_record_modified = T("Item(s) updated on Request"), msg_record_deleted = T("Item(s) deleted from Request"), msg_list_empty = T("No Items currently requested")) # Reusable Field req_item_id = S3ReusableField("req_item_id", "reference %s" % tablename, requires = IS_NULL_OR( IS_ONE_OF(db, "req_req_item.id", self.req_item_represent, orderby="req_req_item.id", sort=True)), represent = self.req_item_represent, label = T("Request Item"), comment = DIV(_class="tooltip", _title="%s|%s" % (T("Request Item"), T("Select Items from the Request"))), ondelete = "CASCADE", script = ''' S3OptionsFilter({ 'triggerName':'req_item_id', 'targetName':'item_pack_id', 'lookupResource':'item_pack', 'lookupPrefix':'supply', 'lookupURL':S3.Ap.concat('/req/req_item_packs/'), 'msgNoRecords':i18n.no_packs, 'fncPrep':S3.supply.fncPrepItem, 'fncRepresent':S3.supply.fncRepresentItem })''') if settings.get_req_prompt_match(): # Shows the inventory items which match a requested item # @ToDo: Make this page a component of req_item create_next = URL(c="req", f="req_item_inv_item", args=["[id]"]) else: create_next = None list_fields = ["id", "item_id", "item_pack_id", ] if settings.get_req_prompt_match(): list_fields.append("site_id") list_fields.append("quantity") if use_commit: list_fields.append("quantity_commit") if show_qty_transit: list_fields.append("quantity_transit") list_fields.append("quantity_fulfil") list_fields.append("comments") filter_widgets = [ S3OptionsFilter("req_id$fulfil_status", label=T("Status"), options = self.req_status_opts, cols = 3, ), S3OptionsFilter("req_id$priority", label=T("Priority"), options = self.req_priority_opts, cols = 3, ), S3LocationFilter("req_id$site_id$location_id", levels = [#"L1", #"L2", "L3", "L4", ], widget = "multiselect", ), ] self.configure(tablename, super_entity = "supply_item_entity", onaccept = req_item_onaccept, ondelete = req_item_ondelete, create_next = create_next, deletable = settings.get_req_multiple_req_items(), deduplicate = self.req_item_duplicate, list_fields = list_fields, filter_widgets = filter_widgets, extra_fields = ["item_pack_id"], ) # --------------------------------------------------------------------- # # Req <> Item Category link table # # - used to provide a search filter # - populated onaccept/ondelete of req_item # tablename = "req_req_item_category" define_table(tablename, req_id(empty=False), self.supply_item_category_id(), *s3_meta_fields() ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_item_id = req_item_id, req_item_represent = self.req_item_represent, ) # ------------------------------------------------------------------------- def defaults(self): """ Safe defaults for model-global names in case module is disabled """ req_item_id = S3ReusableField("req_item_id", "integer", readable=False, writable=False) return dict(req_item_id = req_item_id ) # ------------------------------------------------------------------------- @staticmethod def req_item_represent(id, row=None): """ Represent a Request Item """ if row: # @ToDo: Optimised query where we don't need to do the join id = row.id elif not id: return current.messages["NONE"] db = current.db ritable = db.req_req_item sitable = db.supply_item query = (ritable.id == id) & \ (ritable.item_id == sitable.id) record = db(query).select(sitable.name, limitby = (0, 1)).first() if record: return record.name else: return None # --------------------------------------------------------------------- @staticmethod def req_qnty_commit_represent(quantity, show_link=True): """ call the generic quantity represent """ return S3RequestItemModel.req_quantity_represent(quantity, "commit", show_link) # --------------------------------------------------------------------- @staticmethod def req_qnty_transit_represent(quantity, show_link=True): """ call the generic quantity represent """ return S3RequestItemModel.req_quantity_represent(quantity, "transit", show_link) # --------------------------------------------------------------------- @staticmethod def req_qnty_fulfil_represent(quantity, show_link=True): """ call the generic quantity represent """ return S3RequestItemModel.req_quantity_represent(quantity, "fulfil", show_link) # --------------------------------------------------------------------- @staticmethod def req_quantity_represent(quantity, type, show_link=True): """ @ToDo: There should be better control of this feature - currently this only works with req_items which are being matched by commit / send / recv """ if quantity and show_link and \ not current.deployment_settings.get_req_item_quantities_writable(): return TAG[""](quantity, A(DIV(_class = "quantity %s ajax_more collapsed" % type ), _href = "#", ) ) else: return quantity # ------------------------------------------------------------------------- @staticmethod def req_item_delete(row): """ Update the """ h # ------------------------------------------------------------------------- @staticmethod def req_item_duplicate(job): """ This callback will be called when importing records it will look to see if the record being imported is a duplicate. @param job: An S3ImportJob object which includes all the details of the record being imported If the record is a duplicate then it will set the job method to update Rules for finding a duplicate: - If the Request Number matches - The item is the same """ if job.tablename == "req_req_item": itable = job.table s3db = current.s3db rtable = s3db.req_req stable = s3db.supply_item req_id = None item_id = None for ref in job.references: if ref.entry.tablename == "req_req": if ref.entry.id != None: req_id = ref.entry.id else: uuid = ref.entry.item_id jobitem = job.job.items[uuid] req_id = jobitem.id elif ref.entry.tablename == "supply_item": if ref.entry.id != None: item_id = ref.entry.id else: uuid = ref.entry.item_id jobitem = job.job.items[uuid] item_id = jobitem.id if req_id != None and item_id != None: query = (itable.req_id == req_id) & \ (itable.item_id == item_id) else: return _duplicate = current.db(query).select(itable.id, limitby=(0, 1)).first() if _duplicate: job.id = _duplicate.id job.data.id = _duplicate.id job.method = job.METHOD.UPDATE # ============================================================================= class S3RequestSkillModel(S3Model): """ """ names = ["req_req_skill", "req_skill_represent", ] def model(self): T = current.T settings = current.deployment_settings quantities_writable = settings.get_req_skill_quantities_writable() use_commit = settings.get_req_use_commit() define_table = self.define_table # ----------------------------------------------------------------- # Request Skills # tablename = "req_req_skill" define_table(tablename, self.req_req_id(empty=False), # Make this a Component #Field("task", # readable=False, # writable=False, # Populated from req_req 'Purpose' # label = T("Task Details")), self.hrm_multi_skill_id( label = T("Required Skills"), comment = T("Leave blank to request an unskilled person"), represent = lambda id: \ id and S3Represent(lookup="hrm_skill", multiple=True)(id) or \ T("No Skills Required"), ), # @ToDo: Add a minimum competency rating? Field("quantity", "integer", notnull=True, default = 1, requires = IS_INT_IN_RANGE(1, 999999), label = T("Number of People Required"), ), self.org_site_id, Field("quantity_commit", "integer", label = T("Quantity Committed"), default = 0, requires = IS_INT_IN_RANGE(1, 999999), readable = use_commit, writable = use_commit and quantities_writable), Field("quantity_transit", "integer", label = T("Quantity in Transit"), #represent = lambda quantity_transit: \ # req_quantity_represent(quantity_transit, # "transit"), default = 0, requires = IS_INT_IN_RANGE(1, 999999), writable = quantities_writable), Field("quantity_fulfil", "integer", label = T("Quantity Fulfilled"), default = 0, requires = IS_INT_IN_RANGE(1, 999999), writable = quantities_writable), s3_comments( #label = T("Task Details"), #comment = DIV(_class="tooltip", # _title="%s|%s" % (T("Task Details"), # T("Include any special requirements such as equipment which they need to bring."))) ), *s3_meta_fields()) # @todo: make lazy_table table = current.db[tablename] table.site_id.label = T("Requested From") if not settings.get_req_show_quantity_transit(): table.quantity_transit.writable = table.quantity_transit.readable= False # CRUD strings ADD_REQUEST_SKILL = T("Add Skill to Request") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_REQUEST_SKILL, title_display = T("Requested Skill Details"), title_list = T("Requested Skills"), title_update = T("Edit Requested Skill"), label_list_button = T("List Requested Skills"), label_delete_button = T("Remove Skill from Request"), msg_record_created = T("Skill added to Request"), msg_record_modified = T("Requested Skill updated"), msg_record_deleted = T("Skill removed from Request"), msg_list_empty = T("No Skills currently requested")) list_fields = ["id", "skill_id", # @ToDo: Activate based on a deployment_setting #"task", "quantity", "quantity_transit", "quantity_fulfil", "comments", ] if use_commit: list_fields.insert(3, "quantity_commit") # Filter Widgets filter_widgets = [ S3OptionsFilter("req_id$fulfil_status", label=T("Status"), options = self.req_status_opts, cols = 3, ), S3OptionsFilter("req_id$priority", label=T("Priority"), options = self.req_priority_opts, cols = 3, ), S3LocationFilter("req_id$site_id$location_id", levels = [#"L1", #"L2", "L3", "L4", ], widget = "multiselect", ), ] # Configuration self.configure(tablename, onaccept=req_skill_onaccept, # @ToDo: Produce a custom controller like req_item_inv_item? #create_next = URL(c="req", f="req_skill_skill", # args=["[id]"]), deletable = settings.get_req_multiple_req_items(), list_fields = list_fields, filter_widgets = filter_widgets, ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_skill_represent = self.req_skill_represent, ) # ----------------------------------------------------------------- @staticmethod def req_skill_represent(id): """ Used in controllers/req.py commit() """ if not id: return current.messages["NONE"] db = current.db rstable = db.req_req_skill hstable = db.hrm_skill query = (rstable.id == id) & \ (rstable.skill_id == hstable.id) record = db(query).select(hstable.name, limitby = (0, 1)).first() try: return record.name except: return current.messages.UNKNOWN_OPT # ============================================================================= class S3RequestRecurringModel(S3Model): """ """ names = ["req_job", ] def model(self): T = current.T s3 = current.response.s3 # ----------------------------------------------------------------- # Request Job # # Jobs for Scheduling Recurring Requests # tablename = "req_job" self.define_table(tablename, self.req_req_id(empty=False), s3.scheduler_task_id(), *s3_meta_fields()) # CRUD Strings ADD_JOB = T("Create Job") s3.crud_strings[tablename] = Storage( label_create = ADD_JOB, title_display = T("Request Job"), title_list = T("Request Schedule"), title_update = T("Edit Job"), label_list_button = T("List Jobs"), msg_record_created = T("Job added"), msg_record_modified = T("Job updated"), msg_record_deleted = T("Job deleted"), msg_list_empty = T("No jobs configured yet"), msg_no_match = T("No jobs configured")) # Resource Configuration self.set_method("req", "req", component_name="job", method="reset", action=req_job_reset) # Resource Configuration self.set_method("req", "req", component_name="job", method="run", action=req_job_run) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ------------------------------------------------------------------------- @staticmethod def req_recurring_duplicate(job): """ De-duplicate Recurring Request Jobs """ if job.tablename == "req_recurring": table = job.table name = job.data.get("name", None) query = (table.name == name) _duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if _duplicate: job.id = _duplicate.id job.data.id = _duplicate.id job.method = job.METHOD.UPDATE # ============================================================================= class S3RequestSummaryModel(S3Model): """ Simple Requests Management System - Organisations can request Money or Time from remote volunteers - Sites can request Time from local volunteers or accept drop-off for Goods """ names = ["req_organisation_needs", "req_site_needs", ] def model(self): T = current.T configure = self.configure crud_strings = current.response.s3.crud_strings define_table = self.define_table # ----------------------------------------------------------------- # Summary of Needs for an Organisation # tablename = "req_organisation_needs" define_table(tablename, self.org_organisation_id( requires = self.org_organisation_requires(required=True), ), Field("money", "boolean", label = T("Soliciting Cash Donations?"), represent = s3_yes_no_represent, default = False, ), Field("money_details", "text", label = T("Details"), widget = s3_richtext_widget, ), Field("vol", "boolean", label = T("Opportunities to Volunteer Remotely?"), represent = s3_yes_no_represent, default = False, ), Field("vol_details", "text", label = T("Details"), widget = s3_richtext_widget, ), *s3_meta_fields()) # CRUD strings ADD_NEEDS = T("Add Organization Needs") crud_strings[tablename] = Storage( title_display=T("Organization Needs"), title_update=T("Edit Organization Needs"), label_delete_button=T("Delete Organization Needs"), msg_record_created=T("Organization Needs added"), msg_record_modified=T("Organization Needs updated"), msg_record_deleted=T("Organization Needs deleted")) configure(tablename, context = {"organisation": "organisation_id", }, ) # ----------------------------------------------------------------- # Summary of Needs for a site # tablename = "req_site_needs" define_table(tablename, self.super_link("site_id", "org_site"), Field("vol", "boolean", label = T("Opportunities to Volunteer On-Site?"), represent = s3_yes_no_represent, default = False, ), Field("vol_details", "text", label = T("Details"), widget = s3_richtext_widget, ), Field("goods", "boolean", label = T("Drop-off Location for Goods?"), represent = s3_yes_no_represent, default = False, ), Field("goods_details", "text", label = T("Details"), widget = s3_richtext_widget, ), #s3_comments("needs", # label=T("Needs"), # comment=None, # widget=S3PriorityListWidget(), # ), *s3_meta_fields()) # CRUD strings ADD_NEEDS = T("Add Site Needs") crud_strings[tablename] = Storage( title_display=T("Site Needs"), title_update=T("Edit Site Needs"), label_delete_button=T("Delete Site Needs"), msg_record_created=T("Site Needs added"), msg_record_modified=T("Site Needs updated"), msg_record_deleted=T("Site Needs deleted")) configure(tablename, context = {"location": "site_id$organisation_id", "organisation": "organisation_id", "site": "site_id", }, ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ============================================================================= class S3RequestTaskModel(S3Model): """ Link Requests for Skills to Tasks """ names = ["req_task", ] def model(self): #T = current.T # ----------------------------------------------------------------- # Link Skill Requests to Tasks # tablename = "req_task_req" self.define_table(tablename, self.project_task_id(), self.req_req_id(empty=False), #self.req_req_person_id(), #self.req_req_skill_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ============================================================================= class S3CommitModel(S3Model): """ """ names = ["req_commit", "req_commit_id", ] def model(self): T = current.T db = current.db auth = current.auth s3 = current.response.s3 add_components = self.add_components settings = current.deployment_settings req_types = settings.get_req_req_type() commit_value = settings.get_req_commit_value() unsolicited_commit = settings.get_req_commit_without_request() committer_is_author = settings.get_req_committer_is_author() if committer_is_author: site_default = auth.user.site_id if auth.is_logged_in() else None committer_default = auth.s3_logged_in_person() else: site_default = None committer_default = None # Dropdown or Autocomplete? if settings.get_org_site_autocomplete(): site_widget = S3SiteAutocompleteWidget() site_comment = DIV(_class="tooltip", _title="%s|%s" % (T("From Facility"), current.messages.AUTOCOMPLETE_HELP)) else: site_widget = None site_comment = None # --------------------------------------------------------------------- # Commitments (Pledges) tablename = "req_commit" self.define_table(tablename, self.super_link("site_id", "org_site", comment = site_comment, default = site_default, label = T("From Facility"), # Non-Item Requests make False in the prep readable = True, writable = True, represent = self.org_site_represent, widget = site_widget, ), self.gis_location_id( # Used for reporting on where Donations originated readable = False, writable = False ), # Non-Item Requests make True in the prep self.org_organisation_id( readable = False, writable = False ), # @ToDo: deployment_setting for whether this can be empty self.req_req_id( empty = not unsolicited_commit, ), Field("type", "integer", # These are copied automatically from the Req readable = False, writable = False, ), s3_datetime(default = "now", represent = "date", ), s3_datetime("date_available", label = T("Date Available"), represent = "date", ), self.pr_person_id("committer_id", default = committer_default, label = T("Committed By"), comment = self.pr_person_comment(child="committer_id"), ), # @ToDo: Calculate this from line items in Item Commits Field("value", "double", label = T("Estimated Value"), readable = commit_value, writable = commit_value, ), # @ToDo: Move this into a Currency Widget for the value field s3_currency(readable = commit_value, writable = commit_value, ), Field("cancel", "boolean", default = False, label = T("Cancel"), readable = False, writable = False, ), s3_comments(), *s3_meta_fields()) # Which levels of Hierarchy are we using? hierarchy = current.gis.get_location_hierarchy() levels = hierarchy.keys() if len(settings.get_gis_countries()) == 1 or \ s3.gis.config.region_location_id: levels.remove("L0") filter_widgets = [ S3TextFilter(["committer_id$first_name", "committer_id$middle_name", "committer_id$last_name", "site_id$name", "comments", "req_id$name", "organisation_id$name" ], label = T("Search"), comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), ), S3LocationFilter("location_id", levels=levels, widget="multiselect", hidden=True, ), S3DateFilter("date", label=T("Date"), hide_time=True, comment=T("Search for commitments made between these dates."), hidden=True, ), S3DateFilter("date_available", label=T("Date Available"), hide_time=True, comment=T("Search for commitments available between these dates."), hidden=True, ), ] if len(req_types) > 1: filter_widgets.insert(1, S3OptionsFilter("type", label=T("Type"), cols = len(req_types), hidden=True, )) # CRUD strings ADD_COMMIT = T("Make Commitment") s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT, title_display = T("Commitment Details"), title_list = T("Commitments"), title_update = T("Edit Commitment"), label_list_button = T("List Commitments"), label_delete_button = T("Delete Commitment"), msg_record_created = T("Commitment Added"), msg_record_modified = T("Commitment Updated"), msg_record_deleted = T("Commitment Canceled"), msg_list_empty = T("No Commitments")) # Reusable Field commit_id = S3ReusableField("commit_id", "reference %s" % tablename, sortby="date", requires = IS_NULL_OR( IS_ONE_OF(db, "req_commit.id", self.commit_represent, orderby="req_commit.date", sort=True)), represent = self.commit_represent, label = T("Commitment"), ondelete = "CASCADE") self.configure(tablename, context = {"event": "req_id$event_id", "location": "location_id", "organisation": "organisation_id", "request": "req_id", #"site": "site_id", "site": "req_id$site_id", }, filter_widgets = filter_widgets, list_fields = ["site_id", "req_id", "committer_id", # @ToDo: Vary by deployment_setting (easy) # @ToDo: Allow a single column to support different components based on type # @ToDo: Include Qty too (Computed VF in component?) (T("Committed Items"), "commit_item.req_item_id$item_id"), #(T("Committed People"), "commit_person.person_id"), #(T("Committed Skills"), "commit_skill.skill_id"), "date", "date_available", "comments", ], # Commitments should only be made to a specific request listadd = unsolicited_commit, onaccept = self.commit_onaccept, ondelete = self.commit_ondelete, onvalidation = self.commit_onvalidation, ) # Components add_components(tablename, # Committed Items req_commit_item="commit_id", # Committed Persons req_commit_person="commit_id", # Committed Skills req_commit_skill="commit_id", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(req_commit_id = commit_id, ) # ------------------------------------------------------------------------- @staticmethod def commit_represent(id, row=None): """ Represent a Commit """ if row: table = current.db.req_commit elif not id: return current.messages["NONE"] else: db = current.db table = db.req_commit row = db(table.id == id).select(table.type, table.date, table.organisation_id, table.site_id, limitby=(0, 1)).first() if row.type == 1: # Items return "%s - %s" % (table.site_id.represent(row.site_id), table.date.represent(row.date)) else: return "%s - %s" % (table.organisation_id.represent(row.organisation_id), table.date.represent(row.date)) # ------------------------------------------------------------------------- @staticmethod def commit_onvalidation(form): """ Copy the request_type to the commitment """ req_id = s3_get_last_record_id("req_req") if req_id: rtable = current.s3db.req_req query = (rtable.id == req_id) req_record = current.db(query).select(rtable.type, limitby=(0, 1)).first() if req_record: form.vars.type = req_record.type # ------------------------------------------------------------------------- @staticmethod def commit_onaccept(form): """ Update Status of Request & components """ db = current.db s3db = current.s3db form_vars = form.vars # @ToDo: Will this always be in vars? id = form_vars.id if not id: return ctable = s3db.req_commit site_id = form_vars.get("site_id", None) if site_id: # Set location_id to location of site stable = s3db.org_site site = db(stable.site_id == site_id).select(stable.location_id, limitby=(0, 1)).first() if site and site.location_id: db(ctable.id == id).update(location_id = site.location_id) # Find the request rtable = s3db.req_req query = (ctable.id == id) & \ (rtable.id == ctable.req_id) req = db(query).select(rtable.id, rtable.type, rtable.req_status, rtable.commit_status, limitby=(0, 1)).first() if not req: return req_id = req.id type = req.type if type == 1: # Items # Update Commit Status for Items in the Request # Get the full list of items in the request ritable = s3db.req_req_item query = (ritable.req_id == req_id) & \ (ritable.deleted == False) ritems = db(query).select(ritable.id, ritable.item_pack_id, ritable.quantity, # Virtual Field #ritable.pack_quantity, ) # Get all Commits in-system citable = s3db.req_commit_item query = (ctable.req_id == req_id) & \ (citable.commit_id == ctable.id) & \ (citable.deleted == False) citems = db(query).select(citable.item_pack_id, citable.quantity, # Virtual Field #citable.pack_quantity, ) commit_qty = {} for item in citems: item_pack_id = item.item_pack_id if item_pack_id in commit_qty: commit_qty[item_pack_id] += (item.quantity * item.pack_quantity()) else: commit_qty[item_pack_id] = (item.quantity * item.pack_quantity()) complete = False for item in ritems: if item.item_pack_id in commit_qty: quantity_commit = commit_qty[item.item_pack_id] db(ritable.id == item.id).update(quantity_commit=quantity_commit) req_quantity = item.quantity * item.pack_quantity() if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 3: # People ## If this is a single person commitment, then create the commit_person record automatically #table = s3db.req_commit_person #table.insert(commit_id = id, # #skill_id = ???, # person_id = auth.s3_logged_in_person()) ## @ToDo: Mark Person's allocation status as 'Committed' # Update Commit Status for Skills in the Request # Get the full list of skills in the request # @ToDo: Breakdown to component Skills within multi rstable = s3db.req_req_skill query = (rstable.req_id == req_id) & \ (rstable.deleted == False) rskills = db(query).select(rstable.id, rstable.skill_id, rstable.quantity, ) # Get all Commits in-system cstable = s3db.req_commit_skill query = (ctable.req_id == req_id) & \ (cstable.commit_id == ctable.id) & \ (cstable.deleted == False) cskills = db(query).select(cstable.skill_id, cstable.quantity, ) commit_qty = {} for skill in cskills: multi_skill_id = skill.skill_id for skill_id in multi_skill_id: if skill_id in commit_qty: commit_qty[skill_id] += skill.quantity else: commit_qty[skill_id] = skill.quantity complete = False for skill in rskills: multi_skill_id = skill.skill_id quantity_commit = 0 for skill_id in multi_skill_id: if skill_id in commit_qty: if commit_qty[skill_id] > quantity_commit: quantity_commit = commit_qty[skill_id] db(rstable.id == skill.id).update(quantity_commit=quantity_commit) req_quantity = skill.quantity if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 9: # Other # Assume Partial not Complete # @ToDo: Provide a way for the committer to specify this data = {} if req.commit_status == REQ_STATUS_NONE: data["commit_status"] = REQ_STATUS_PARTIAL if req.req_status == REQ_STATUS_NONE: # Show as 'Responded' data["req_status"] = REQ_STATUS_PARTIAL if data: db(rtable.id == req_id).update(**data) # ------------------------------------------------------------------------- @staticmethod def commit_ondelete(row): """ Update Status of Request & components """ db = current.db s3db = current.s3db id = row.id # Find the request ctable = s3db.req_commit fks = db(ctable.id == id).select(ctable.deleted_fk, limitby=(0, 1) ).first().deleted_fk req_id = json.loads(fks)["req_id"] rtable = s3db.req_req req = db(rtable.id == req_id).select(rtable.id, rtable.type, rtable.commit_status, limitby=(0, 1)).first() if not req: return req_id = req.id type = req.type if type == 1: # Items # Update Commit Status for Items in the Request # Get the full list of items in the request ritable = s3db.req_req_item query = (ritable.req_id == req_id) & \ (ritable.deleted == False) ritems = db(query).select(ritable.id, ritable.item_pack_id, ritable.quantity, # Virtual Field #ritable.pack_quantity, ) # Get all Commits in-system # - less those from this commit citable = s3db.req_commit_item query = (ctable.req_id == req_id) & \ (citable.commit_id == ctable.id) & \ (citable.commit_id != id) & \ (citable.deleted == False) citems = db(query).select(citable.item_pack_id, citable.quantity, # Virtual Field #citable.pack_quantity, ) commit_qty = {} for item in citems: item_pack_id = item.item_pack_id if item_pack_id in commit_qty: commit_qty[item_pack_id] += (item.quantity * item.pack_quantity()) else: commit_qty[item_pack_id] = (item.quantity * item.pack_quantity()) complete = False for item in ritems: if item.item_pack_id in commit_qty: quantity_commit = commit_qty[item.item_pack_id] db(ritable.id == item.id).update(quantity_commit=quantity_commit) req_quantity = item.quantity * item.pack_quantity() if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) elif not citems: db(rtable.id == req_id).update(commit_status=REQ_STATUS_NONE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 3: # People ## If this is a single person commitment, then create the commit_person record automatically #table = s3db.req_commit_person #table.insert(commit_id = vars.id, # #skill_id = ???, # person_id = auth.s3_logged_in_person()) ## @ToDo: Mark Person's allocation status as 'Committed' # Update Commit Status for Skills in the Request # Get the full list of skills in the request rstable = s3db.req_req_skill query = (rstable.req_id == req_id) & \ (rstable.deleted == False) rskills = db(query).select(rstable.id, rstable.skill_id, rstable.quantity, ) # Get all Commits in-system # - less those from this commit cstable = s3db.req_commit_skill query = (ctable.req_id == req_id) & \ (cstable.commit_id == ctable.id) & \ (cstable.commit_id != id) & \ (cstable.deleted == False) cskills = db(query).select(cstable.skill_id, cstable.quantity, ) commit_qty = {} for skill in cskills: multi_skill_id = skill.skill_id for skill_id in multi_skill_id: if skill_id in commit_qty: commit_qty[skill_id] += skill.quantity else: commit_qty[skill_id] = skill.quantity complete = False for skill in rskills: multi_skill_id = skill.skill_id quantity_commit = 0 for skill_id in multi_skill_id: if skill_id in commit_qty: if commit_qty[skill_id] > quantity_commit: quantity_commit = commit_qty[skill_id] db(rstable.id == skill.id).update(quantity_commit=quantity_commit) req_quantity = skill.quantity if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) elif not cskills: db(rtable.id == req_id).update(commit_status=REQ_STATUS_NONE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) elif type == 9: # Other if req.commit_status != REQ_STATUS_NONE: # Assume Complete not partial # @ToDo: Provide a way for the committer to specify this db(rtable.id == req_id).update(commit_status=REQ_STATUS_NONE) # ============================================================================= class S3CommitItemModel(S3Model): """ """ names = ["req_commit_item", "req_send_commit" ] def model(self): T = current.T # ----------------------------------------------------------------- # Commitment Items # @ToDo: Update the req_item_id in the commit_item if the req_id of the commit is changed tablename = "req_commit_item" self.define_table(tablename, self.req_commit_id(), #item_id, #supply_item_id(), self.req_item_id(), self.supply_item_pack_id(), Field("quantity", "double", notnull=True, label = T("Quantity")), Field.Method("pack_quantity", self.supply_item_pack_quantity(tablename=tablename)), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_COMMIT_ITEM = T("Add Item to Commitment") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT_ITEM, title_display = T("Commitment Item Details"), title_list = T("Commitment Items"), title_update = T("Edit Commitment Item"), label_list_button = T("List Commitment Items"), label_delete_button = T("Delete Commitment Item"), msg_record_created = T("Commitment Item added"), msg_record_modified = T("Commitment Item updated"), msg_record_deleted = T("Commitment Item deleted"), msg_list_empty = T("No Commitment Items currently registered")) self.configure(tablename, onaccept = self.commit_item_onaccept, extra_fields = ["item_pack_id"]) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict(# Used by commit_req() controller req_commit_item_onaccept = self.commit_item_onaccept, req_send_commit = self.req_send_commit, ) # ------------------------------------------------------------------------- @staticmethod def commit_item_onaccept(form): """ Update the Commit Status for the Request Item & Request """ db = current.db vars = form.vars req_item_id = vars.req_item_id # Get the req_id ritable = db.req_req_item req = db(ritable.id == req_item_id).select(ritable.req_id, limitby=(0, 1)).first() if not req: return req_id = req.req_id # Get the full list of items in the request query = (ritable.req_id == req_id) & \ (ritable.deleted == False) ritems = db(query).select(ritable.id, ritable.item_pack_id, ritable.quantity, # Virtual Field #ritable.pack_quantity, ) # Get all Commits in-system ctable = db.req_commit citable = db.req_commit_item query = (ctable.req_id == req_id) & \ (citable.commit_id == ctable.id) & \ (citable.deleted == False) citems = db(query).select(citable.item_pack_id, citable.quantity, # Virtual Field #citable.pack_quantity, ) commit_qty = {} for item in citems: item_pack_id = item.item_pack_id if item_pack_id in commit_qty: commit_qty[item_pack_id] += (item.quantity * item.pack_quantity()) else: commit_qty[item_pack_id] = (item.quantity * item.pack_quantity()) complete = False for item in ritems: if item.item_pack_id in commit_qty: quantity_commit = commit_qty[item.item_pack_id] db(ritable.id == item.id).update(quantity_commit=quantity_commit) req_quantity = item.quantity * item.pack_quantity() if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status rtable = db.req_req if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) # ------------------------------------------------------------------------- @staticmethod def req_send_commit(): """ Create a Shipment containing all items in a Commitment """ # Get the commit record try: commit_id = current.request.args[0] except: redirect(URL(c="req", f="commit")) db = current.db s3db = current.s3db req_table = db.req_req rim_table = db.req_req_item com_table = db.req_commit cim_table = db.req_commit_item send_table = s3db.inv_send tracktable = s3db.inv_track_item query = (com_table.id == commit_id) & \ (com_table.req_id == req_table.id) & \ (com_table.deleted == False) record = db(query).select(com_table.committer_id, com_table.site_id, com_table.organisation_id, req_table.requester_id, req_table.site_id, req_table.req_ref, limitby=(0, 1)).first() # @ToDo: Identify if we have stock items which match the commit items # If we have a single match per item then proceed automatically (as-now) & then decrement the stock quantity # If we have no match then warn the user & ask if they should proceed anyway # If we have mulitple matches then provide a UI to allow the user to select which stock items to use # Create an inv_send and link to the commit vars = Storage(sender_id = record.req_commit.committer_id, site_id = record.req_commit.site_id, recipient_id = record.req_req.requester_id, to_site_id = record.req_req.site_id, req_ref = record.req_req.req_ref, status = 0) send_id = send_table.insert(**vars) vars.id = send_id # Get all of the committed items query = (cim_table.commit_id == commit_id) & \ (cim_table.req_item_id == rim_table.id) & \ (cim_table.deleted == False) records = db(query).select(rim_table.id, rim_table.item_id, rim_table.item_pack_id, rim_table.currency, rim_table.quantity, rim_table.quantity_transit, rim_table.quantity_fulfil, cim_table.quantity, ) # Create inv_track_items for each commit item insert = tracktable.insert for row in records: rim = row.req_req_item # Now done as a VirtualField instead (looks better & updates closer to real-time, so less of a race condition) #quantity_shipped = max(rim.quantity_transit, rim.quantity_fulfil) #quantity_needed = rim.quantity - quantity_shipped id = insert(req_item_id = rim.id, track_org_id = record.req_commit.organisation_id, send_id = send_id, status = 1, item_id = rim.item_id, item_pack_id = rim.item_pack_id, currency = rim.currency, #req_quantity = quantity_needed, quantity = row.req_commit_item.quantity, recv_quantity = row.req_commit_item.quantity, ) # Create the Waybill form = Storage() form.vars = vars s3db.inv_send_onaccept(form) # Redirect to inv_send for the send id just created redirect(URL(#c = "inv", or "req" f = "send", #args = [send_id, "track_item"] args = [send_id] )) # ============================================================================= class S3CommitPersonModel(S3Model): """ Commit a named individual to a Request """ names = ["req_commit_person"] def model(self): T = current.T # ----------------------------------------------------------------- # Committed Persons # tablename = "req_commit_person" self.define_table(tablename, self.req_commit_id(), # For reference self.hrm_multi_skill_id( writable=False, comment=None, ), # This should be person not hrm as we want to mark them as allocated self.pr_person_id(), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_COMMIT_PERSON = T("Add Person to Commitment") current.response.s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT_PERSON, title_display = T("Committed Person Details"), title_list = T("Committed People"), title_update = T("Edit Committed Person"), label_list_button = T("List Committed People"), label_delete_button = T("Remove Person from Commitment"), msg_record_created = T("Person added to Commitment"), msg_record_modified = T("Committed Person updated"), msg_record_deleted = T("Person removed from Commitment"), msg_list_empty = T("No People currently committed")) # @ToDo: Fix this before enabling #self.configure(tablename, # onaccept = self.commit_person_onaccept) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ------------------------------------------------------------------------- @staticmethod def commit_person_onaccept(form): """ Not working """ db = current.db s3db = current.s3db table = db.req_commit_person rstable = s3db.req_req_skill # Try to get req_skill_id from the form req_skill_id = 0 if form: req_skill_id = form.vars.get("req_skill_id", None) if not req_skill_id: commit_skill_id = s3_get_last_record_id("req_commit_skill") r_commit_skill = table[commit_skill_id] req_skill_id = r_commit_skill.req_skill_id query = (table.req_skill_id == req_skill_id) & \ (table.deleted == False) commit_skills = db(query).select(table.quantity) quantity_commit = 0 for commit_skill in commit_skills: quantity_commit += commit_skill.quantity r_req_skill = db.req_req_skill[req_skill_id] rstable[req_skill_id] = dict(quantity_commit = quantity_commit) # Update status_commit of the req record s3_store_last_record_id("req_req_skill", r_req_skill.id) req_skill_onaccept(None) # ============================================================================= class S3CommitSkillModel(S3Model): """ Commit anonymous people to a Request """ names = ["req_commit_skill"] def model(self): T = current.T # ----------------------------------------------------------------- # Committed Skills # tablename = "req_commit_skill" self.define_table(tablename, self.req_commit_id(), self.hrm_multi_skill_id(), Field("quantity", "double", notnull=True, label = T("Quantity")), s3_comments(), *s3_meta_fields()) # CRUD strings current.response.s3.crud_strings[tablename] = Storage( label_create = T("Add People to Commitment"), title_display = T("Committed People Details"), title_list = T("Committed People"), title_update = T("Edit Committed People"), label_list_button = T("List Committed People"), label_delete_button = T("Remove People from Commitment"), msg_record_created = T("People added to Commitment"), msg_record_modified = T("Committed People updated"), msg_record_deleted = T("People removed from Commitment"), msg_list_empty = T("No People currently committed")) self.configure(tablename, onaccept = self.commit_skill_onaccept) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # ------------------------------------------------------------------------- @staticmethod def commit_skill_onaccept(form): """ Update the Commit Status for the Request Skill & Request """ db = current.db vars = form.vars req_skill_id = vars.req_skill_id # Get the req_id rstable = db.req_req_skill req = db(rstable.id == req_skill_id).select(rstable.req_id, limitby=(0, 1)).first() if not req: return req_id = req.req_id # Get the full list of skills in the request query = (rstable.req_id == req_id) & \ (rstable.deleted == False) rskills = db(query).select(rstable.id, rstable.skill_id, rstable.quantity, ) # Get all Commits in-system ctable = db.req_commit cstable = db.req_commit_skill query = (ctable.req_id == req_id) & \ (cstable.commit_id == ctable.id) & \ (cstable.deleted == False) cskills = db(query).select(cstable.skill_id, cstable.quantity, ) commit_qty = {} for skill in cskills: multi_skill_id = skill.skill_id for skill_id in multi_skill_id: if skill_id in commit_qty: commit_qty[skill_id] += skill.quantity else: commit_qty[skill_id] = skill.quantity complete = False for skill in rskills: multi_skill_id = skill.skill_id quantity_commit = 0 for skill_id in multi_skill_id: if skill_id in commit_qty: if commit_qty[skill_id] > quantity_commit: quantity_commit = commit_qty[skill_id] db(rstable.id == skill.id).update(quantity_commit=quantity_commit) req_quantity = skill.quantity if quantity_commit >= req_quantity: complete = True else: complete = False # Update overall Request Status rtable = db.req_req if complete: db(rtable.id == req_id).update(commit_status=REQ_STATUS_COMPLETE) else: db(rtable.id == req_id).update(commit_status=REQ_STATUS_PARTIAL) # ============================================================================= def req_item_onaccept(form): """ Update Request Status Update req_item_category link table """ req_id = form.vars.get("req_id", None) if not req_id: req_id = s3_get_last_record_id("req_req") if not req_id: raise HTTP(500, "can not get req_id") # Update Request Status req_update_status(req_id) # Update req_item_category link table item_id = form.vars.get("item_id", None) db = current.db citable = db.supply_catalog_item cats = db(citable.item_id == item_id).select(citable.item_category_id) rictable = db.req_req_item_category for cat in cats: item_category_id = cat.item_category_id query = (rictable.deleted == False) & \ (rictable.req_id == req_id) & \ (rictable.item_category_id == item_category_id) exists = db(query).select(rictable.id, limitby=(0, 1)) if not exists: rictable.insert(req_id = req_id, item_category_id = item_category_id) # ============================================================================= def req_item_ondelete(row): """ """ db = current.db sitable = db.supply_item ritable = db.req_req_item item = db(ritable.id == row.id).select(ritable.deleted_fk, limitby=(0, 1)).first() fks = json.loads(item.deleted_fk) req_id = fks["req_id"] item_id = fks["item_id"] citable = db.supply_catalog_item cats = db(citable.item_id == item_id).select(citable.item_category_id) for cat in cats: item_category_id = cat.item_category_id # Check if we have other req_items in the same category query = (ritable.deleted == False) & \ (ritable.req_id == req_id) & \ (ritable.item_id == sitable.id) & \ (sitable.item_category_id == item_category_id) others = db(query).select(ritable.id, limitby=(0, 1)) if not others: # Delete req_item_category link table rictable = db.req_req_item_category query = (rictable.req_id == req_id) & \ (rictable.item_category_id == item_category_id) db(query).delete() # ============================================================================= def req_update_status(req_id): """ Update Request Status commit_status, transit_status, fulfil_status None => quantity = 0 for ALL items Partial => some items have quantity > 0 Complete => quantity_x = quantity(requested) for ALL items """ db = current.db s3db = current.s3db table = s3db.req_req_item is_none = dict(commit = True, transit = True, fulfil = True) is_complete = dict(commit = True, transit = True, fulfil = True) # Must check all items in the req query = (table.req_id == req_id) & \ (table.deleted == False ) req_items = db(query).select(table.quantity, table.quantity_commit, table.quantity_transit, table.quantity_fulfil) for req_item in req_items: quantity = req_item.quantity for status_type in ["commit", "transit", "fulfil"]: if req_item["quantity_%s" % status_type] < quantity: is_complete[status_type] = False if req_item["quantity_%s" % status_type]: is_none[status_type] = False status_update = {} for status_type in ["commit", "transit", "fulfil"]: if is_complete[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_COMPLETE elif is_none[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_NONE else: status_update["%s_status" % status_type] = REQ_STATUS_PARTIAL rtable = s3db.req_req db(rtable.id == req_id).update(**status_update) # ============================================================================= def req_skill_onaccept(form): """ Update req_req. commit_status, transit_status, fulfil_status None => quantity = 0 for ALL skills Partial => some skills have quantity > 0 Complete => quantity_x = quantity(requested) for ALL skills Create a Task for People to be assigned to """ if form and form.vars.req_id: req_id = form.vars.req_id else: req_id = s3_get_last_record_id("req_req") if not req_id: raise HTTP(500, "can not get req_id") db = current.db s3db = current.s3db rtable = s3db.req_req query = (rtable.id == req_id) record = db(query).select(rtable.purpose, limitby=(0, 1)).first() table = s3db.req_req_skill query = (table.req_id == req_id) #if record: # # Copy the Task description to the Skills component # db(query).update(task=record.purpose) is_none = dict(commit = True, transit = True, fulfil = True) is_complete = dict(commit = True, transit = True, fulfil = True) # Must check all skills in the req req_skills = db(query).select(table.quantity, table.quantity_commit, table.quantity_transit, table.quantity_fulfil) for req_skill in req_skills: quantity = req_skill.quantity for status_type in ["commit", "transit", "fulfil"]: if req_skill["quantity_%s" % status_type] < quantity: is_complete[status_type] = False if req_skill["quantity_%s" % status_type]: is_none[status_type] = False status_update = {} for status_type in ["commit", "transit", "fulfil"]: if is_complete[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_COMPLETE elif is_none[status_type]: status_update["%s_status" % status_type] = REQ_STATUS_NONE else: status_update["%s_status" % status_type] = REQ_STATUS_PARTIAL query = (rtable.id == req_id) db(query).update(**status_update) if current.deployment_settings.has_module("project"): # Add a Task to which the People can be assigned # Get the request record otable = s3db.org_site query = (rtable.id == req_id) & \ (otable.id == rtable.site_id) record = db(query).select(rtable.req_ref, rtable.purpose, rtable.priority, rtable.requester_id, rtable.site_id, otable.location_id, limitby=(0, 1)).first() if not record: return name = record.req_req.req_ref or "Req: %s" % req_id table = s3db.project_task task = table.insert(name=name, description=record.req_req.purpose, priority=record.req_req.priority, location_id=record.org_site.location_id, site_id=record.req_req.site_id) # Add the Request as a Component to the Task table = s3db.table("req_task_req", None) if table: table.insert(task_id = task, req_id = req_id) # ============================================================================= def req_req_details(row): """ Show the requested items/skills """ if hasattr(row, "req_req"): row = row.req_req try: id = row.id type = row.type except AttributeError: return None if type == 1: s3db = current.s3db itable = s3db.supply_item ltable = s3db.req_req_item query = (ltable.deleted != True) & \ (ltable.req_id == id) & \ (ltable.item_id == itable.id) items = current.db(query).select(itable.name, ltable.quantity) if items: items = ["%s %s" % (int(item.req_req_item.quantity), item.supply_item.name) for item in items] return ",".join(items) elif type == 3: s3db = current.s3db ltable = s3db.req_req_skill query = (ltable.deleted != True) & \ (ltable.req_id == id) skills = current.db(query).select(ltable.skill_id, ltable.quantity) if skills: represent = S3Represent(lookup="hrm_skill", multiple=True, none=current.T("Unskilled") ) skills = ["%s %s" % (skill.quantity, represent(skill.skill_id)) \ for skill in skills] return ",".join(skills) return current.messages["NONE"] # ============================================================================= def req_req_drivers(row): """ Show the driver(s) details """ if hasattr(row, "req_req"): row = row.req_req try: req_ref = row.req_ref type = row.type except AttributeError: return None if type == 1: s3db = current.s3db stable = s3db.inv_send query = (stable.deleted != True) & \ (stable.req_ref == req_ref) drivers = current.db(query).select(stable.driver_name, stable.driver_phone, stable.vehicle_plate_no) if drivers: drivers = ["%s %s %s" % (driver.driver_name or "", driver.driver_phone or "", driver.vehicle_plate_no or "") \ for driver in drivers] return ",".join(drivers) return current.messages["NONE"] # ============================================================================= def req_rheader(r, check_page=False): """ Resource Header for Requests """ if r.representation == "html": if r.name == "req": record = r.record if record: T = current.T s3db = current.s3db request = current.request s3 = current.response.s3 settings = current.deployment_settings use_commit = settings.get_req_use_commit() is_template = record.is_template tabs = [(T("Edit Details"), None)] type = record.type if type == 1 and settings.has_module("inv"): if settings.get_req_multiple_req_items(): req_item_tab_label = T("Items") else: req_item_tab_label = T("Item") tabs.append((req_item_tab_label, "req_item")) elif type == 3 and settings.has_module("hrm"): tabs.append((T("People"), "req_skill")) tabs.append((T("Documents"), "document")) if is_template: tabs.append((T("Schedule"), "job")) elif use_commit: tabs.append((T("Commitments"), "commit")) if not check_page: rheader_tabs = s3_rheader_tabs(r, tabs) else: rheader_tabs = DIV() site_id = request.vars.site_id if site_id and not is_template: site_name = s3db.org_site_represent(site_id, show_link=False) commit_btn = A(T("Send from %s") % site_name, _href = URL(c = "req", f = "send_req", args = [r.id], vars = dict(site_id = site_id) ), _class = "action-btn" ) s3.rfooter = TAG[""](commit_btn) elif r.component and \ r.component_name == "commit" and \ r.component_id: prepare_btn = A(T("Prepare Shipment"), _href = URL(f = "send_commit", args = [r.component_id] ), _id = "send_commit", _class = "action-btn" ) s3.rfooter = TAG[""](prepare_btn) site_id = record.site_id if site_id: db = current.db stable = s3db.org_site if settings.get_req_show_quantity_transit() and not is_template: transit_status = s3db.req_status_opts.get(record.transit_status, "") try: if site_id and \ record.transit_status in [REQ_STATUS_PARTIAL, REQ_STATUS_COMPLETE] and \ record.fulfil_status in [None, REQ_STATUS_NONE, REQ_STATUS_PARTIAL]: site_record = db(stable.site_id == site_id).select(stable.uuid, stable.instance_type, limitby=(0, 1)).first() instance_type = site_record.instance_type table = s3db[instance_type] query = (table.uuid == site_record.uuid) id = db(query).select(table.id, limitby=(0, 1)).first().id transit_status = SPAN(transit_status, A(T("Incoming Shipments"), _href = URL(c = instance_type.split("_")[0], f = "incoming", vars = {"viewing" : "%s.%s" % (instance_type, id)} ) ) ) except: pass transit_status = (TH("%s: " % T("Transit Status")), transit_status) else: transit_status = ("") table = r.table if settings.get_req_use_req_number() and not is_template: headerTR = TR(TH("%s: " % table.req_ref.label), TD(table.req_ref.represent(record.req_ref, show_link=True)) ) else: headerTR = TR(TD(settings.get_req_form_name(), _colspan=2, _class="pdf_title"), ) if site_id: org_id = db(stable.site_id == site_id).select(stable.organisation_id, limitby=(0, 1) ).first().organisation_id logo = s3db.org_organisation_logo(org_id) if logo: headerTR.append(TD(logo, _colspan=2)) if is_template: commit_status = ("") fulfil_status = ("") row1 = "" row3 = "" else: if use_commit: commit_status = (TH("%s: " % table.commit_status.label), table.commit_status.represent(record.commit_status)) else: commit_status = ("") fulfil_status = (TH("%s: " % table.fulfil_status.label), table.fulfil_status.represent(record.fulfil_status)) row1 = TR(TH("%s: " % table.date.label), table.date.represent(record.date), *commit_status ) row3 = TR(TH("%s: " % table.date_required.label), table.date_required.represent(record.date_required), *fulfil_status ) rData = TABLE(headerTR, row1, TR(TH("%s: " % table.site_id.label), table.site_id.represent(site_id), *transit_status ), TR(TH("%s: " % table.requester_id.label), table.requester_id.represent(record.requester_id), ), row3, TR(TH("%s: " % table.purpose.label), record.purpose ), TR(TH("%s: " % table.comments.label), TD(record.comments or "", _colspan=3) ), ) rheader = DIV(rData, rheader_tabs, ) return rheader return None # ============================================================================= def req_match(): """ Function to be called from controller functions to display all requests as a tab for a site. """ T = current.T s3db = current.s3db s3 = current.response.s3 request = current.request settings = current.deployment_settings output = dict() viewing = request.get_vars.get("viewing", None) if not viewing: return output if "." in viewing: tablename, id = viewing.split(".", 1) else: return output table = s3db[tablename] site_id = current.db(table.id == id).select(table.site_id, limitby=(0, 1) ).first().site_id actions = [dict(url = URL(c = "req", f = "req", args = ["[id]", "check"], vars = {"site_id": site_id} ), _class = "action-btn", label = str(T("Check")), ) ] if settings.get_req_use_commit(): actions.append( dict(url = URL(c = "req", f = "commit_req", args = ["[id]"], vars = {"site_id": site_id} ), _class = "action-btn", label = str(T("Commit")), ) ) actions.append( dict(url = URL(c = "req", f = "send_req", args = ["[id]"], vars = {"site_id": site_id} ), _class = "action-btn dispatch", label = str(T("Send")), ) ) s3.actions = actions if tablename == "org_office": rheader = s3db.org_rheader elif tablename == "org_facility": rheader = s3db.org_facility_rheader elif tablename == "inv_warehouse": rheader = s3db.inv_rheader elif tablename == "cr_shelter": rheader = s3db.cr_shelter_rheader elif tablename == "hms_hospital": rheader = s3db.hms_hospital_rheader else: rheader = None s3.filter = (s3db.req_req.site_id != site_id) s3db.configure("req_req", insertable=False) # Pre-process def prep(r): # Plugin OrgRoleManager auth = current.auth if auth.user is not None and \ tablename in S3OrgRoleManager.ENTITY_TYPES: sr = auth.get_system_roles() realms = auth.user.realms or Storage() if sr.ADMIN in realms or sr.ORG_ADMIN in realms and \ (realms[sr.ORG_ADMIN] is None or \ r.record.pe_id in realms[sr.ORG_ADMIN]): r.set_handler("roles", S3OrgRoleManager()) return True s3.prep = prep # Post-process def postp(r, output): if r.representation == "html": output["title"] = s3.crud_strings[tablename].title_display return output s3.postp = postp output = current.rest_controller("req", "req", rheader=rheader) return output # ============================================================================= def req_job_reset(r, **attr): """ RESTful method to reset a job status from FAILED to QUEUED, for "Reset" action button """ if r.interactive: if r.component and r.component.alias == "job": job_id = r.component_id if job_id: S3Task.reset(job_id) current.session.confirmation = current.T("Job reactivated") r.component_id = None redirect(r.url(method="")) # ============================================================================= def req_job_run(r, **attr): """ RESTful method to run a job now, for "Run Now" action button """ if r.interactive: if r.id: current.s3task.async("req_add_from_template", [r.id], # args {"user_id":current.auth.user.id} # vars ) current.session.confirmation = current.T("Request added") r.component_id = None redirect(r.url(method="")) # ============================================================================= def req_add_from_template(req_id): """ Add a Request from a Template """ fieldnames = ["type", "priority", "site_id", "purpose", "requester_id", "comments", ] db = current.db s3db = current.s3db table = s3db.req_req fields = [table[field] for field in fieldnames] # Load Template template = db(table.id == req_id).select(limitby=(0, 1), *fields).first() data = {"is_template": False} try: for field in fieldnames: data[field] = template[field] except: raise "Template not found: %s" % req_id settings = current.deployment_settings if settings.get_req_use_req_number(): code = s3db.supply_get_shipping_code(settings.get_req_shortname(), template.site_id, table.req_ref, ) data["req_ref"] = code id = table.insert(**data) if template.type == 1: # Copy across req_item table = s3db.req_req_item fieldnames = ["site_id", "item_id", "item_pack_id", "quantity", "pack_value", "currency", "comments", ] fields = [table[field] for field in fieldnames] items = db(table.req_id == req_id).select(*fields) for item in items: data = {"req_id": id} for field in fieldnames: data[field] = item[field] table.insert(**data) elif template.type == 3: # Copy across req_skill table = s3db.req_req_skill fieldnames = ["site_id", "task", "skill_id", "quantity", "comments", ] fields = [table[field] for field in fieldnames] skills = db(table.req_id == req_id).select(*fields) for skill in skills: data = {"req_id": id} for field in fieldnames: data[field] = skill[field] table.insert(**data) return id # ============================================================================= def req_customize_req_fields(): """ Customize req_req fields for the Home page & dataList view - this assumes Simple Requests (i.e. type 'Other') """ # Truncate purpose field from s3.s3utils import s3_trunk8 s3_trunk8(lines=2) T = current.T db = current.db s3db = current.s3db s3 = current.response.s3 tablename = "req_req" table = s3db.req_req crud_fields = ["date", #"priority", "site_id", #"is_template", "requester_id", "purpose", ] request = current.request args = request.args if "update.popup" in args or \ "update" in args: field = table.req_status field.writable = True field.requires = IS_IN_SET({REQ_STATUS_NONE: T("Open"), REQ_STATUS_PARTIAL: T("Responded"), REQ_STATUS_COMPLETE: T("Resolved"), REQ_STATUS_CANCEL: T("Cancelled"), }) crud_fields.append("req_status") crud_form = S3SQLCustomForm(*crud_fields) list_fields = crud_fields + ["site_id$location_id", "site_id$location_id$level", "site_id$location_id$parent", "site_id$organisation_id", "site_id$comments", ] table.type.default = 9 # Other field = table.purpose field.label = T("Request") field.requires = IS_NOT_EMPTY(error_message=T("Please enter details of the Request")) field.represent = lambda body: XML(s3_URLise(body)) field = table.date field.label = T("Date") # Make mandatory requires = field.requires field.requires = requires.other field = table.site_id site_id = request.get_vars.get("~.(site)", None) if site_id: field.default = site_id field.readable = field.writable = False # Lookup Site Contact script = \ '''var fieldname='req_req_requester_id' var real_input=$('#'+fieldname) $.when(S3.addPersonWidgetReady(fieldname)).then( function(status){real_input.data('lookup_contact')(fieldname,%s)}, function(status){s3_debug(status)}, function(status){s3_debug(status)})''' % site_id s3.jquery_ready.append(script) else: # If the Requester is blank, then lookup default Site Contact script = \ '''$('#req_req_site_id').change(function(){ var site_id=$(this).val() if(site_id){ var fieldname='req_req_requester_id' var real_input=$('#'+fieldname) if(!real_input.val()&&!$('#req_req_requester_id_full_name').val()){ real_input.data('lookup_contact')(fieldname,site_id) }}})''' s3.jquery_ready.append(script) organisation_id = request.get_vars.get("~.(organisation)", None) if organisation_id: # Restrict to Sites belonging to this Org # @ToDo: Handle Branches filterby = "organisation_id" filter_opts = [organisation_id] # No need to use Site Autocomplete in this case field.widget = None else: filterby = None filter_opts = [] field.label = T("Requested for Site") #site_represent = s3db.org_SiteRepresent(show_link=False, # show_type=False) site_represent = S3Represent(lookup="org_site") field.represent = site_represent field.requires = IS_ONE_OF(db, "org_site.site_id", site_represent, filterby = filterby, filter_opts = filter_opts, not_filterby = "obsolete", not_filter_opts = [True], orderby = "org_site.name", sort = True, ) field.comment = S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id", parent="req"), title=T("Add New Site"), ) db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ") field = table.requester_id field.requires = IS_ADD_PERSON_WIDGET2() field.widget = S3AddPersonWidget2(controller="pr") # Which levels of Hierarchy are we using? hierarchy = current.gis.get_location_hierarchy() levels = hierarchy.keys() if len(current.deployment_settings.get_gis_countries()) == 1 or \ s3.gis.config.region_location_id: levels.remove("L0") filter_widgets = [ S3TextFilter(["requester_id$first_name", "requester_id$middle_name", "requester_id$last_name", "site_id$name", "purpose", #"comments", ], label = T("Search"), comment=T("Search for a request by Site name, Requester name or free text."), ), #S3OptionsFilter("transit_status", # label = T("Transit Status"), # options = s3db.req_status_opts, # cols = 3, # ), #S3OptionsFilter("fulfil_status", # label = T("Fulfill Status"), # options = s3db.req_status_opts, # cols = 3, # ), S3LocationFilter("site_id$location_id", levels=levels, widget="multiselect", #hidden=True, ), S3OptionsFilter("site_id", label=T("Requested For Site"), widget="multiselect", hidden=True, ), S3DateFilter("date", label=T("Date"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for requests made between these dates."), hidden=True, ), #S3DateFilter("date_required", # label=T("Date Needed By"), # hide_time=True, # input_labels = {"ge": "From", "le": "To"}, # comment=T("Search for requests required between these dates."), # hidden=True, # ), ] # @ToDo: deployment_setting if current.auth.s3_has_role("EDITOR"): filter_widgets.insert(-1, S3OptionsFilter("created_by", label=T("Logged By"), widget="multiselect", hidden=True, )) # Return to Requests view after create/update/delete (unless done via Modal) url_next = URL(c="req", f="req", args="datalist") s3db.configure(tablename, create_next = url_next, crud_form = crud_form, delete_next = url_next, filter_formstyle = filter_formstyle, filter_widgets = filter_widgets, # We want the Create form to be in a modal, not inline, for consistency listadd = False, list_fields = list_fields, list_layout = req_req_list_layout, update_next = url_next, ) return table # ============================================================================= def req_req_list_layout(list_id, item_id, resource, rfields, record): """ Default dataList item renderer for Requests on the Home page & dataList view @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["req_req.id"] item_class = "thumbnail" raw = record._row date = record["req_req.date"] body = record["req_req.purpose"] location = record["org_site.location_id"] or "" level = raw["gis_location.level"] if level: location_id = raw["org_site.location_id"] else: location_id = raw["gis_location.parent"] if location_id: location_url = URL(c="gis", f="location", args=[location_id, "profile"]) else: location_url = "#" organisation = record["org_site.organisation_id"] or "" organisation_id = raw["org_site.organisation_id"] org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"]) person = record["req_req.requester_id"] person_id = raw["req_req.requester_id"] person_url = URL(c="pr", f="person", args=[person_id]) person = A(person, _href=person_url, ) # Avatar # Try Organisation Logo db = current.db otable = db.org_organisation row = db(otable.id == organisation_id).select(otable.logo, limitby=(0, 1) ).first() if row and row.logo: logo = URL(c="default", f="download", args=[row.logo]) avatar = IMG(_src=logo, _height=50, _width=50, _style="padding-right:5px;", _class="media-object") avatar = A(avatar, _href=org_url, _class="pull-left", ) else: # Personal Avatar avatar = s3_avatar_represent(person_id, tablename="pr_person", _class="media-object") avatar = A(avatar, _href=person_url, _class="pull-left", ) # Edit Bar T = current.T auth = current.auth permit = auth.s3_has_permission table = db.req_req if permit("update", table, record_id=record_id): edit_btn = A(I(" ", _class="icon icon-edit"), _href=URL(c="req", f="req", args=[record_id, "update.popup"], vars={"refresh": list_id, "record": record_id}), _class="s3_modal", _title=T("Edit Request"), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(I(" ", _class="icon icon-trash"), _class="dl-item-delete", ) else: delete_btn = "" edit_bar = DIV(edit_btn, delete_btn, _class="edit-bar fright", ) s3db = current.s3db site = record["req_req.site_id"] site_id = raw["req_req.site_id"] table = s3db.org_facility facility = db(table.site_id == site_id).select(table.id, limitby=(0, 1) ).first() if facility: site_url = URL(c="org", f="facility", args=[facility.id, "profile"]) opts = dict(_href=site_url) site_comments = raw["org_site.comments"] or "" if site_comments: opts["_class"] = "s3-popover" opts["_data-toggle"] = "popover" opts["_data-content"] = site_comments site_link = A(site, **opts) card_title = TAG[""](I(_class="icon icon-request"), SPAN(site_link, _class="card-title")) else: card_title = TAG[""](I(_class="icon icon-request"), SPAN(" ", _class="card-title")) #if priority == 3: # # Apply additional highlighting for High Priority # item_class = "%s disaster" % item_class # Tallies # NB We assume that all records are readable here table = s3db.req_commit query = (table.deleted == False) & \ (table.req_id == record_id) tally_commits = db(query).count() #if permit("create", table): if auth.is_logged_in(): _class="s3_modal btn" commit_url = URL(c="req", f="commit", args=["create.popup"], vars={"req_id": record_id, "refresh": list_id, "record": record_id, }, ) else: _class="btn" next = "/%s/req/commit/create?req_id=%s" % (current.request.application, record_id) commit_url = URL(c="default", f="user", args="login", vars={"_next": next, }, ) commit_btn = A(I(" ", _class="icon icon-truck"), " ", T("DONATE"), _href=commit_url, _class=_class, _title=T("Donate to this Request"), ) # Render the item item = DIV(DIV(card_title, SPAN(A(location, _href=location_url, ), _class="location-title", ), SPAN(date, _class="date-title", ), edit_bar, _class="card-header", ), DIV(avatar, DIV(DIV(SPAN(body, _class="s3-truncate"), DIV(person, " - ", A(organisation, _href=org_url, _class="card-organisation", ), _class="card-person", ), _class="media pull-left", ), DIV(P(A(T("Donations"), _href=URL(c="req", f="req", args=[record_id, "profile"], ), ), SPAN(tally_commits, _class="badge", ), _class="tally", ), commit_btn, _class="media pull-right", ), _class="media-body", ), _class="media", ), #docs, _class=item_class, _id=item_id, ) return item # ============================================================================= def req_customize_commit_fields(): """ Customize req_commit fields for the Home page & dataList view """ # Truncate comments field from s3.s3utils import s3_trunk8 s3_trunk8(lines=2) T = current.T s3db = current.s3db s3 = current.response.s3 settings = current.deployment_settings tablename = "req_commit" table = s3db.req_commit list_fields = [#"req_id", # populated automatically or not at all? "organisation_id", "committer_id", "comments", "date_available", # We'd like to be able to map donations, but harder for users to enter data #"location_id", ] if settings.get_req_commit_value(): list_fields += ["value", "currency", ] request = current.request args = request.args if "create.popup" in args or \ "create" in args: req_id = request.get_vars.get("req_id", None) if req_id: table.req_id.default = req_id elif not settings.get_req_commit_without_request(): current.session.error = T("Not allowed to Donate without matching to a Request!") redirect(URL(c="req", f="req", args=["datalist"])) elif "update.popup" in args or \ "update" in args: list_fields.append("cancel") # CRUD strings #ADD_COMMIT = T("Make Donation") ADD_COMMIT = T("Add Donation") s3.crud_strings[tablename] = Storage( label_create = ADD_COMMIT, title_display = T("Donation Details"), title_list = T("Donations"), title_update = T("Edit Donation"), label_list_button = T("List Donations"), label_delete_button = T("Delete Donation"), msg_record_created = T("Donation Added"), msg_record_modified = T("Donation Updated"), msg_record_deleted = T("Donation Canceled"), msg_list_empty = T("No Donations")) auth = current.auth # @ToDo: deployment_setting if auth.s3_has_role("EDITOR"): editor = True else: editor = False field = table.committer_id if editor: field.requires = IS_ADD_PERSON_WIDGET2() field.widget = S3AddPersonWidget2(controller="pr") field.default = None else: field.writable = False #field = table.location_id #field.represent = s3db.gis_LocationRepresent(sep=" | ") # Required #field.requires = IS_LOCATION_SELECTOR2() field = table.comments field.label = T("Donation") field.represent = lambda body: XML(s3_URLise(body)) field.required = True # @ToDo field.comment = None table.date_available.default = current.request.utcnow field = table.organisation_id field.readable = True field.comment = S3AddResourceLink(c="org", f="organisation_id", title=T("Create Organization"), ) if settings.get_org_autocomplete(): # Enable if there are many Orgs field.widget = S3OrganisationAutocompleteWidget() if editor: # Editor can select Org field.writable = True crud_form = S3SQLCustomForm(*list_fields) elif auth.user and auth.user.organisation_id: field.default = auth.user.organisation_id field.writable = False crud_form = S3SQLCustomForm(*list_fields) else: # Only a User representing an Org can commit for an Org field.default = None field.writable = False crud_fields = [f for f in list_fields if f != "organisation_id"] crud_form = S3SQLCustomForm(*crud_fields) filter_widgets = [ S3TextFilter(["committer_id$first_name", "committer_id$middle_name", "committer_id$last_name", "site_id$name", "comments", "req_id$name", "organisation_id$name" ], label = T("Search"), comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), ), S3LocationFilter("location_id", widget="multiselect", hidden=True, ), #S3DateFilter("date", # label=T("Date"), # hide_time=True, # input_labels = {"ge": "From", "le": "To"}, # comment=T("Search for commitments made between these dates."), # hidden=True, # ), S3DateFilter("date_available", label=T("Date Available"), hide_time=True, input_labels = {"ge": "From", "le": "To"}, comment=T("Search for commitments available between these dates."), hidden=True, ), ] # Return to Requests view after create/update/delete (unless done via Modal) url_next = URL(c="req", f="req", args="datalist") s3db.configure(tablename, create_next = url_next, crud_form = crud_form, delete_next = url_next, filter_formstyle = filter_formstyle, filter_widgets = filter_widgets, # We want the Create form to be in a modal, not inline, for consistency listadd = False, list_fields = list_fields, list_layout = req_commit_list_layout, update_next = url_next, ) return table # ============================================================================= def req_commit_list_layout(list_id, item_id, resource, rfields, record): """ Default dataList item renderer for Commits on the Home page & dataList view @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["req_commit.id"] item_class = "thumbnail" raw = record._row date = record["req_commit.date_available"] body = record["req_commit.comments"] title = "" #location = record["req_commit.location_id"] #location_id = raw["req_commit.location_id"] #location_url = URL(c="gis", f="location", args=[location_id, "profile"]) person = record["req_commit.committer_id"] person_id = raw["req_commit.committer_id"] person_url = URL(c="pr", f="person", args=[person_id]) person = A(person, _href=person_url, ) organisation_id = raw["req_commit.organisation_id"] if organisation_id: organisation = record["req_commit.organisation_id"] org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"]) organisation = A(organisation, _href=org_url, _class="card-organisation", ) organisation = TAG[""](" - ", organisation) # Use Organisation Logo # @ToDo: option for Personal Avatar (fallback if no Org Logo?) db = current.db otable = db.org_organisation row = db(otable.id == organisation_id).select(otable.logo, limitby=(0, 1) ).first() if row and row.logo: logo = URL(c="default", f="download", args=[row.logo]) else: logo = URL(c="static", f="img", args="blank-user.gif") avatar = IMG(_src=logo, _height=50, _width=50, _style="padding-right:5px;", _class="media-object") avatar = A(avatar, _href=org_url, _class="pull-left", ) else: organisation = "" # Personal Avatar avatar = s3_avatar_represent(person_id, tablename="pr_person", _class="media-object") avatar = A(avatar, _href=person_url, _class="pull-left", ) # Edit Bar permit = current.auth.s3_has_permission table = current.s3db.req_commit if permit("update", table, record_id=record_id): edit_btn = A(I(" ", _class="icon icon-edit"), _href=URL(c="req", f="commit", args=[record_id, "update.popup"], vars={"refresh": list_id, "record": record_id}), _class="s3_modal", _title=current.T("Edit Donation"), ) else: edit_btn = "" if permit("delete", table, record_id=record_id): delete_btn = A(I(" ", _class="icon icon-trash"), _class="dl-item-delete", ) else: delete_btn = "" edit_bar = DIV(edit_btn, delete_btn, _class="edit-bar fright", ) card_label = TAG[""](I(_class="icon icon-offer"), SPAN(" %s" % title, _class="card-title")) # Render the item item = DIV(DIV(card_label, #SPAN(A(location, # _href=location_url, # ), # _class="location-title", # ), SPAN(date, _class="date-title", ), edit_bar, _class="card-header", ), DIV(avatar, DIV(DIV(SPAN(body, _class="s3-truncate"), DIV(person, organisation, _class="card-person", ), _class="media", ), _class="media-body", ), _class="media", ), #docs, _class=item_class, _id=item_id, ) return item # ----------------------------------------------------------------------------- def filter_formstyle(row_id, label, widget, comment, hidden=False): """ Custom Formstyle for FilterForm @param row_id: HTML id for the row @param label: the label @param widget: the form widget @param comment: the comment @param hidden: whether the row should initially be hidden or not """ if hidden: _class = "advanced hide" else: _class= "" if not label: label = "" if comment: if current.response.s3.rtl: dir = "fleft" else: dir = "fright" comment = DIV(_class = "tooltip %s" % dir, _title = "%s|%s" % (label[0][:-1], comment), ) else: comment = "" return DIV(label, widget, comment, _id=row_id, _class=_class, ) # END =========================================================================
en
0.549571
# -*- coding: utf-8 -*- Sahana Eden Request Model @copyright: 2009-2013 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # ============================================================================= # Multiple Item/Skill Types per Request? # # Number hardcoded in controller & JS #if settings.has_module("asset") and "Asset" in req_types_deployed: # req_type_opts[2] = T("Assets") #if settings.has_module("cr") and "Shelter" in req_types_deployed: # req_type_opts[4] = T("Shelter") # Dropdown or Autocomplete? # --------------------------------------------------------------------- # Requests # @ToDo: Replace with Link Table # Hours, so 1 year #represent="date", #widget="date", #@ToDo: Colour code the priority text - red, orange, green #represent = self.req_priority_represent, # This is a component, so needs to be a super_link # - can't override field name, ondelete or requires #Field("location", # label = T("Neighborhood")), # Donations: What will the Items be used for?; People: Task Details # Only-needed for summary mode (unused) #represent = self.req_purpose_represent, # Allow time for people to fill out form # Hours, so 1 year #represent="date", #widget="date", # Hours, so 1 year #writable = False, # This field should be in req_commit, but that complicates the UI #default = auth.s3_logged_in_person() # Could be T("Date Delivered") - make deployment_setting # Hours, so 1 year # @ToDo: Set this in Update forms? Dedicated 'Receive' button? # (Definitely not in Create forms) #default = auth.s3_logged_in_person() # Simple Status # - currently just enabled in customize_req_fields() workflow # Detailed Status # @todo: make lazy_table # CRUD strings # Which levels of Hierarchy are we using? #S3TextFilter(["committer_id$first_name", # "committer_id$middle_name", # "committer_id$last_name", # "site_id$name", # "comments", # "req_id$name", # "organisation_id$name" # ], # label = T("Search") # comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."), # ), #"site_id$location_id$L1", #"site_id$location_id$L2", # @ToDo: id gets stripped in _select_field # Reusable Field #"event_id", # @ToDo: Vary by deployment_setting (easy) # @ToDo: Allow a single column to support different components based on type # @ToDo: Include Qty too (Computed VF in component?) #(T("Items"), "item.item_id"), #(T("Skills"), "skill.skill_id"), #if len(settings.get_req_req_type()) > 1: # list_fields.append("type") # @ToDo: Deprecate with type-based components (see above) # Custom Methods # Print Forms # Components # Documents # Requested Items # Requested Skills # Commitment # Item Categories # Scheduler Jobs (for recurring requests) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ------------------------------------------------------------------------- Safe defaults for model-global names in case module is disabled # ------------------------------------------------------------------------- Function to be called from REST prep functions - main module & components (sites & events) # Hide fields which don't make sense in a Create form # Filter the list of Contacts to those for the site S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, }) #table.site_id.comment = A(T("Set as default Site"), # _id="req_req_site_id_link", # _target="_blank", # _href=URL(c="default", # f="user", # args=["profile"])) # Show the Required Until Field # (gets turned-off by JS for other types) # Script to inject into Pages which include Request create forms i18n.req_purpose="%s" i18n.req_site_id="%s" i18n.req_request_for_id="%s" i18n.req_recv_by_id="%s" i18n.req_items_purpose="%s" i18n.req_items_site_id="%s" i18n.req_items_recv_by_id="%s" i18n.req_people_purpose="%s" i18n.req_people_site_id="%s" i18n.req_people_recv_by_id="%s" i18n.req_next_msg="%s" i18n.req_other_msg="%s" i18n.req_details_mandatory="%s" # ------------------------------------------------------------------------- Function to be called from REST prep functions - to add req_item & req_skill components as inline forms # Dropdown not Autocomplete S3OptionsFilter({ 'triggerName':'item_id', 'targetName':'item_pack_id', 'lookupPrefix':'supply', 'lookupResource':'item_pack', 'msgNoRecords':i18n.no_packs, 'fncPrep':S3.supply.fncPrepItem, 'fncRepresent':S3.supply.fncRepresentItem }) # Custom Form # Filter the list of Contacts to those for the site S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, }) # Custom Form # Custom Form # Filter the list of Contacts to those for the site S3OptionsFilter({ 'triggerName':'site_id', 'targetName':'requester_id', 'lookupResource':'staff', 'lookupURL':S3.Ap.concat('/hrm/staff_for_site/'), 'msgNoRecords':'%s', 'optional':true, }) # ------------------------------------------------------------------------- Function to be called from REST prep functions - main module & components (sites) # ------------------------------------------------------------------------- Represent a Request # ------------------------------------------------------------------------- Represet the Commitment Status of the Request # Include the Site Name of the Committer if we can # @ToDo: figure out how! # ------------------------------------------------------------------------- Represent for the Request Reference if show_link is True then it will generate a link to the record if pdf is True then it will generate a link to the PDF # ------------------------------------------------------------------------- Generate a PDF of a Request Form # Not Supported - redirect to normal PDF #pdf_footer = inv_recv_pdf_footer, # ------------------------------------------------------------------------- Custom Method to copy an existing Request - creates a req with req_item records # Make a copy of the request record # Make a copy of each child record # Items # People and skills # ------------------------------------------------------------------------- Custom Method to commit to a Request - creates a commit with commit_items for each req_item # Check if there is an existing Commitment # Browse existing commitments # Create the commitment # Items # Mark Item in the Request as Committed # Mark Request as Committed # People # Mark Item in the Request as Committed # Mark Request as Committed # Other # Mark Request as Committed # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- Hide the Update Quantity Status Fields from Request create forms # ------------------------------------------------------------------------- Add a set of Tabs for a Site's Request Tasks @ToDo: Roll these up like inv_tabs in inv.py # ------------------------------------------------------------------------- Check to see if your Inventory can be used to match any open Requests # Get req_items & inv_items from this site # VF #itable.pack_quantity, # Convert inv item quantity to req item quantity #A(req_item.id), # This requires an action btn to get the req_id #req_quantity_represent(req_item.quantity_commit, "commit"), #req_quantity_represent(req_item.quantity_fulfil, "fulfil"), #req_quantity_represent(req_item.quantity_transit, "transit"), #A(req_item.id), # This requires an action btn to get the req_id #req_quantity_represent(req_item.quantity_fulfil, "fulfil"), #req_quantity_represent(req_item.quantity_transit, "transit"), #s3.actions = [req_item_inv_item_btn] # pag won't work # ------------------------------------------------------------------------- After DB I/O # If the req_ref is None then set it up # Translate Simple Status # read current status # If the requester has no HR record, then create one # Check that the Request site belongs to this Org # @ToDo: Think about branches # Set the HR record as being for this site # Lookup the Org for the site # Is there already a site_contact for this site? # Configure the next page to go to based on the request type # ------------------------------------------------------------------------- Cleanup any scheduled tasks # ------------------------------------------------------------------------- This callback will be called when importing records it will look to see if the record being imported is a duplicate. @param job: An S3ImportJob object which includes all the details of the record being imported If the record is a duplicate then it will set the job method to update Rules for finding a duplicate: - If the Request Number exists then it's a duplicate # ============================================================================= # ----------------------------------------------------------------- # Request Items # # @ToDo: Move this into a Currency Widget for the pack_value field # @todo: make lazy_table # CRUD strings # Reusable Field S3OptionsFilter({ 'triggerName':'req_item_id', 'targetName':'item_pack_id', 'lookupResource':'item_pack', 'lookupPrefix':'supply', 'lookupURL':S3.Ap.concat('/req/req_item_packs/'), 'msgNoRecords':i18n.no_packs, 'fncPrep':S3.supply.fncPrepItem, 'fncRepresent':S3.supply.fncRepresentItem }) # Shows the inventory items which match a requested item # @ToDo: Make this page a component of req_item #"L1", #"L2", # --------------------------------------------------------------------- # # Req <> Item Category link table # # - used to provide a search filter # - populated onaccept/ondelete of req_item # # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ------------------------------------------------------------------------- Safe defaults for model-global names in case module is disabled # ------------------------------------------------------------------------- Represent a Request Item # @ToDo: Optimised query where we don't need to do the join # --------------------------------------------------------------------- call the generic quantity represent # --------------------------------------------------------------------- call the generic quantity represent # --------------------------------------------------------------------- call the generic quantity represent # --------------------------------------------------------------------- @ToDo: There should be better control of this feature - currently this only works with req_items which are being matched by commit / send / recv # ------------------------------------------------------------------------- Update the # ------------------------------------------------------------------------- This callback will be called when importing records it will look to see if the record being imported is a duplicate. @param job: An S3ImportJob object which includes all the details of the record being imported If the record is a duplicate then it will set the job method to update Rules for finding a duplicate: - If the Request Number matches - The item is the same # ============================================================================= # ----------------------------------------------------------------- # Request Skills # # Make this a Component #Field("task", # readable=False, # writable=False, # Populated from req_req 'Purpose' # label = T("Task Details")), # @ToDo: Add a minimum competency rating? #represent = lambda quantity_transit: \ # req_quantity_represent(quantity_transit, # "transit"), #label = T("Task Details"), #comment = DIV(_class="tooltip", # _title="%s|%s" % (T("Task Details"), # T("Include any special requirements such as equipment which they need to bring."))) # @todo: make lazy_table # CRUD strings # @ToDo: Activate based on a deployment_setting #"task", # Filter Widgets #"L1", #"L2", # Configuration # @ToDo: Produce a custom controller like req_item_inv_item? #create_next = URL(c="req", f="req_skill_skill", # args=["[id]"]), # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ----------------------------------------------------------------- Used in controllers/req.py commit() # ============================================================================= # ----------------------------------------------------------------- # Request Job # # Jobs for Scheduling Recurring Requests # # CRUD Strings # Resource Configuration # Resource Configuration # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ------------------------------------------------------------------------- De-duplicate Recurring Request Jobs # ============================================================================= Simple Requests Management System - Organisations can request Money or Time from remote volunteers - Sites can request Time from local volunteers or accept drop-off for Goods # ----------------------------------------------------------------- # Summary of Needs for an Organisation # # CRUD strings # ----------------------------------------------------------------- # Summary of Needs for a site # #s3_comments("needs", # label=T("Needs"), # comment=None, # widget=S3PriorityListWidget(), # ), # CRUD strings # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ============================================================================= Link Requests for Skills to Tasks #T = current.T # ----------------------------------------------------------------- # Link Skill Requests to Tasks # #self.req_req_person_id(), #self.req_req_skill_id(), # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ============================================================================= # Dropdown or Autocomplete? # --------------------------------------------------------------------- # Commitments (Pledges) # Non-Item Requests make False in the prep # Used for reporting on where Donations originated # Non-Item Requests make True in the prep # @ToDo: deployment_setting for whether this can be empty # These are copied automatically from the Req # @ToDo: Calculate this from line items in Item Commits # @ToDo: Move this into a Currency Widget for the value field # Which levels of Hierarchy are we using? # CRUD strings # Reusable Field #"site": "site_id", # @ToDo: Vary by deployment_setting (easy) # @ToDo: Allow a single column to support different components based on type # @ToDo: Include Qty too (Computed VF in component?) #(T("Committed People"), "commit_person.person_id"), #(T("Committed Skills"), "commit_skill.skill_id"), # Commitments should only be made to a specific request # Components # Committed Items # Committed Persons # Committed Skills # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ------------------------------------------------------------------------- Represent a Commit # Items # ------------------------------------------------------------------------- Copy the request_type to the commitment # ------------------------------------------------------------------------- Update Status of Request & components # @ToDo: Will this always be in vars? # Set location_id to location of site # Find the request # Items # Update Commit Status for Items in the Request # Get the full list of items in the request # Virtual Field #ritable.pack_quantity, # Get all Commits in-system # Virtual Field #citable.pack_quantity, # Update overall Request Status # People ## If this is a single person commitment, then create the commit_person record automatically #table = s3db.req_commit_person #table.insert(commit_id = id, # #skill_id = ???, # person_id = auth.s3_logged_in_person()) ## @ToDo: Mark Person's allocation status as 'Committed' # Update Commit Status for Skills in the Request # Get the full list of skills in the request # @ToDo: Breakdown to component Skills within multi # Get all Commits in-system # Update overall Request Status # Other # Assume Partial not Complete # @ToDo: Provide a way for the committer to specify this # Show as 'Responded' # ------------------------------------------------------------------------- Update Status of Request & components # Find the request # Items # Update Commit Status for Items in the Request # Get the full list of items in the request # Virtual Field #ritable.pack_quantity, # Get all Commits in-system # - less those from this commit # Virtual Field #citable.pack_quantity, # Update overall Request Status # People ## If this is a single person commitment, then create the commit_person record automatically #table = s3db.req_commit_person #table.insert(commit_id = vars.id, # #skill_id = ???, # person_id = auth.s3_logged_in_person()) ## @ToDo: Mark Person's allocation status as 'Committed' # Update Commit Status for Skills in the Request # Get the full list of skills in the request # Get all Commits in-system # - less those from this commit # Update overall Request Status # Other # Assume Complete not partial # @ToDo: Provide a way for the committer to specify this # ============================================================================= # ----------------------------------------------------------------- # Commitment Items # @ToDo: Update the req_item_id in the commit_item if the req_id of the commit is changed #item_id, #supply_item_id(), # CRUD strings # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # Used by commit_req() controller # ------------------------------------------------------------------------- Update the Commit Status for the Request Item & Request # Get the req_id # Get the full list of items in the request # Virtual Field #ritable.pack_quantity, # Get all Commits in-system # Virtual Field #citable.pack_quantity, # Update overall Request Status # ------------------------------------------------------------------------- Create a Shipment containing all items in a Commitment # Get the commit record # @ToDo: Identify if we have stock items which match the commit items # If we have a single match per item then proceed automatically (as-now) & then decrement the stock quantity # If we have no match then warn the user & ask if they should proceed anyway # If we have mulitple matches then provide a UI to allow the user to select which stock items to use # Create an inv_send and link to the commit # Get all of the committed items # Create inv_track_items for each commit item # Now done as a VirtualField instead (looks better & updates closer to real-time, so less of a race condition) #quantity_shipped = max(rim.quantity_transit, rim.quantity_fulfil) #quantity_needed = rim.quantity - quantity_shipped #req_quantity = quantity_needed, # Create the Waybill # Redirect to inv_send for the send id just created #c = "inv", or "req" #args = [send_id, "track_item"] # ============================================================================= Commit a named individual to a Request # ----------------------------------------------------------------- # Committed Persons # # For reference # This should be person not hrm as we want to mark them as allocated # CRUD strings # @ToDo: Fix this before enabling #self.configure(tablename, # onaccept = self.commit_person_onaccept) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ------------------------------------------------------------------------- Not working # Try to get req_skill_id from the form # Update status_commit of the req record # ============================================================================= Commit anonymous people to a Request # ----------------------------------------------------------------- # Committed Skills # # CRUD strings # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # # ------------------------------------------------------------------------- Update the Commit Status for the Request Skill & Request # Get the req_id # Get the full list of skills in the request # Get all Commits in-system # Update overall Request Status # ============================================================================= Update Request Status Update req_item_category link table # Update Request Status # Update req_item_category link table # ============================================================================= # Check if we have other req_items in the same category # Delete req_item_category link table # ============================================================================= Update Request Status commit_status, transit_status, fulfil_status None => quantity = 0 for ALL items Partial => some items have quantity > 0 Complete => quantity_x = quantity(requested) for ALL items # Must check all items in the req # ============================================================================= Update req_req. commit_status, transit_status, fulfil_status None => quantity = 0 for ALL skills Partial => some skills have quantity > 0 Complete => quantity_x = quantity(requested) for ALL skills Create a Task for People to be assigned to #if record: # # Copy the Task description to the Skills component # db(query).update(task=record.purpose) # Must check all skills in the req # Add a Task to which the People can be assigned # Get the request record # Add the Request as a Component to the Task # ============================================================================= Show the requested items/skills # ============================================================================= Show the driver(s) details # ============================================================================= Resource Header for Requests # ============================================================================= Function to be called from controller functions to display all requests as a tab for a site. # Pre-process # Plugin OrgRoleManager # Post-process # ============================================================================= RESTful method to reset a job status from FAILED to QUEUED, for "Reset" action button # ============================================================================= RESTful method to run a job now, for "Run Now" action button # args # vars # ============================================================================= Add a Request from a Template # Load Template # Copy across req_item # Copy across req_skill # ============================================================================= Customize req_req fields for the Home page & dataList view - this assumes Simple Requests (i.e. type 'Other') # Truncate purpose field #"priority", #"is_template", # Other # Make mandatory # Lookup Site Contact var fieldname='req_req_requester_id' var real_input=$('#'+fieldname) $.when(S3.addPersonWidgetReady(fieldname)).then( function(status){real_input.data('lookup_contact')(fieldname,%s)}, function(status){s3_debug(status)}, function(status){s3_debug(status)}) # If the Requester is blank, then lookup default Site Contact $('#req_req_site_id').change(function(){ var site_id=$(this).val() if(site_id){ var fieldname='req_req_requester_id' var real_input=$('#'+fieldname) if(!real_input.val()&&!$('#req_req_requester_id_full_name').val()){ real_input.data('lookup_contact')(fieldname,site_id) }}}) # Restrict to Sites belonging to this Org # @ToDo: Handle Branches # No need to use Site Autocomplete in this case #site_represent = s3db.org_SiteRepresent(show_link=False, # show_type=False) # Which levels of Hierarchy are we using? #"comments", #S3OptionsFilter("transit_status", # label = T("Transit Status"), # options = s3db.req_status_opts, # cols = 3, # ), #S3OptionsFilter("fulfil_status", # label = T("Fulfill Status"), # options = s3db.req_status_opts, # cols = 3, # ), #hidden=True, #S3DateFilter("date_required", # label=T("Date Needed By"), # hide_time=True, # input_labels = {"ge": "From", "le": "To"}, # comment=T("Search for requests required between these dates."), # hidden=True, # ), # @ToDo: deployment_setting # Return to Requests view after create/update/delete (unless done via Modal) # We want the Create form to be in a modal, not inline, for consistency # ============================================================================= Default dataList item renderer for Requests on the Home page & dataList view @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict # Avatar # Try Organisation Logo # Personal Avatar # Edit Bar #if priority == 3: # # Apply additional highlighting for High Priority # item_class = "%s disaster" % item_class # Tallies # NB We assume that all records are readable here #if permit("create", table): # Render the item #docs, # ============================================================================= Customize req_commit fields for the Home page & dataList view # Truncate comments field #"req_id", # populated automatically or not at all? # We'd like to be able to map donations, but harder for users to enter data #"location_id", # CRUD strings #ADD_COMMIT = T("Make Donation") # @ToDo: deployment_setting #field = table.location_id #field.represent = s3db.gis_LocationRepresent(sep=" | ") # Required #field.requires = IS_LOCATION_SELECTOR2() # @ToDo # Enable if there are many Orgs # Editor can select Org # Only a User representing an Org can commit for an Org #S3DateFilter("date", # label=T("Date"), # hide_time=True, # input_labels = {"ge": "From", "le": "To"}, # comment=T("Search for commitments made between these dates."), # hidden=True, # ), # Return to Requests view after create/update/delete (unless done via Modal) # We want the Create form to be in a modal, not inline, for consistency # ============================================================================= Default dataList item renderer for Commits on the Home page & dataList view @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict #location = record["req_commit.location_id"] #location_id = raw["req_commit.location_id"] #location_url = URL(c="gis", f="location", args=[location_id, "profile"]) # Use Organisation Logo # @ToDo: option for Personal Avatar (fallback if no Org Logo?) # Personal Avatar # Edit Bar # Render the item #SPAN(A(location, # _href=location_url, # ), # _class="location-title", # ), #docs, # ----------------------------------------------------------------------------- Custom Formstyle for FilterForm @param row_id: HTML id for the row @param label: the label @param widget: the form widget @param comment: the comment @param hidden: whether the row should initially be hidden or not # END =========================================================================
1.24843
1
api/tests/integration/tests/basic/molfile_stereoparity.py
f1nzer/Indigo
0
6629783
import sys sys.path.append("../../common") from env_indigo import * indigo = Indigo() indigo.setOption("molfile-saving-skip-date", "1") print( "This test checks correct saving of parity values for stereocenters for atoms" ) print( "For molfile 3000 format parity value is stored after an atom with CFG=val" ) print( "For molfile 2000 format parity value is stored in the 3rd value after an atom label" ) print("Molecules:") for mol, num in zip( indigo.iterateSDFile(joinPathPy("molecules/stereo_parity.sdf", __file__)), range(100000), ): print("\n*** Molecule #%d ***" % (num + 1)) saving_modes = ["2000", "3000", "auto"] for mode in saving_modes: indigo.setOption("molfile-saving-mode", mode) print("molfile-saving-mode = %s" % (mode)) print(mol.molfile()) print("Query molecules:") fnames = ["molecules/stereo_parity_query.sdf", "molecules/stereo_parity.sdf"] for f in fnames: for mol, num in zip( indigo.iterateSDFile(joinPathPy(f, __file__)), range(100000) ): qmol = indigo.loadQueryMolecule(mol.rawData()) print("\n*** Query molecule #%d ***" % (num + 1)) saving_modes = ["2000", "3000", "auto"] for mode in saving_modes: indigo.setOption("molfile-saving-mode", mode) print("molfile-saving-mode = %s" % (mode)) print(qmol.molfile())
import sys sys.path.append("../../common") from env_indigo import * indigo = Indigo() indigo.setOption("molfile-saving-skip-date", "1") print( "This test checks correct saving of parity values for stereocenters for atoms" ) print( "For molfile 3000 format parity value is stored after an atom with CFG=val" ) print( "For molfile 2000 format parity value is stored in the 3rd value after an atom label" ) print("Molecules:") for mol, num in zip( indigo.iterateSDFile(joinPathPy("molecules/stereo_parity.sdf", __file__)), range(100000), ): print("\n*** Molecule #%d ***" % (num + 1)) saving_modes = ["2000", "3000", "auto"] for mode in saving_modes: indigo.setOption("molfile-saving-mode", mode) print("molfile-saving-mode = %s" % (mode)) print(mol.molfile()) print("Query molecules:") fnames = ["molecules/stereo_parity_query.sdf", "molecules/stereo_parity.sdf"] for f in fnames: for mol, num in zip( indigo.iterateSDFile(joinPathPy(f, __file__)), range(100000) ): qmol = indigo.loadQueryMolecule(mol.rawData()) print("\n*** Query molecule #%d ***" % (num + 1)) saving_modes = ["2000", "3000", "auto"] for mode in saving_modes: indigo.setOption("molfile-saving-mode", mode) print("molfile-saving-mode = %s" % (mode)) print(qmol.molfile())
de
0.122536
#%d ***" % (num + 1)) #%d ***" % (num + 1))
2.082048
2
33/portscanner_threads.py
tonybaloney/cpython-book-samples
160
6629784
from threading import Thread from queue import Queue import socket import time timeout = 1.0 def check_port(host: str, port: int, results: Queue): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((host, port)) if result == 0: results.put(port) sock.close() def main(): start = time.time() host = "localhost" threads = [] results = Queue() for port in range(80, 100): t = Thread(target=check_port, args=(host, port, results)) t.start() threads.append(t) for t in threads: t.join() while not results.empty(): print("Port {0} is open".format(results.get())) print("Completed scan in {0} seconds".format(time.time() - start)) if __name__ == '__main__': main()
from threading import Thread from queue import Queue import socket import time timeout = 1.0 def check_port(host: str, port: int, results: Queue): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((host, port)) if result == 0: results.put(port) sock.close() def main(): start = time.time() host = "localhost" threads = [] results = Queue() for port in range(80, 100): t = Thread(target=check_port, args=(host, port, results)) t.start() threads.append(t) for t in threads: t.join() while not results.empty(): print("Port {0} is open".format(results.get())) print("Completed scan in {0} seconds".format(time.time() - start)) if __name__ == '__main__': main()
none
1
3.113801
3
utils/lib/deps.py
Laksen/symbiflow-arch-defs
0
6629785
#!/usr/bin/env python3 """ This file needs to be kept in sync with ../../make/deps.mk """ import os import os.path MY_DIR = os.path.dirname(os.path.abspath(__file__)) TOP_DIR = os.path.abspath(os.path.join(MY_DIR, "..", "..")) DEPS_DIR = ".deps" DEPS_EXT = ".d" DEPMK_EXT = ".dmk" def makefile_dir(filepath): """Get the directory part of a path in the same way make does. Python version of the makefile `$(dir xxx)` function. >>> makefile_dir("blah1") '' >>> makefile_dir("a/blah2") 'a/' >>> makefile_dir("../b/blah3") '../b/' >>> makefile_dir("./blah4") '' >>> makefile_dir("/abc/blah6") '/abc/' >>> makefile_dir("/blah5") '/' """ dirname = os.path.dirname(os.path.normpath(filepath)) if not dirname or dirname == '.': return '' if dirname[-1] != '/': dirname += '/' return dirname def makefile_notdir(filepath): """Get the non-directory part of a path in the same way make does. Python version of the makefile `$(nodir xxxx)` function. >>> makefile_notdir("blah1") 'blah1' >>> makefile_notdir("a/blah2") 'blah2' >>> makefile_notdir("../b/blah3") 'blah3' >>> makefile_notdir("blah4/") '' >>> makefile_notdir("/blah5") 'blah5' >>> makefile_notdir("/abc/blah6") 'blah6' """ return os.path.basename(filepath) def deps_dir(filepath, *, top_dir=TOP_DIR): """Get the directory to put dependencies files into. >>> td = os.path.abspath(os.curdir) >>> deps_dir("./a/blah", top_dir=td) '.deps/a/blah' >>> deps_dir("blah", top_dir=td) '.deps/blah' >>> deps_dir("blah.abc", top_dir=td) '.deps/blah.abc' >>> deps_dir("/abc3/blah", top_dir='/abc3') '.deps/blah' >>> deps_dir("/abc3/blah", top_dir='/abc4') Traceback (most recent call last): ... OSError: /abc3/blah is not inside top /abc4 """ filepath = os.path.normpath(filepath) if filepath[0] != '/': filepath = os.path.abspath(filepath) filepath_notop = filepath.replace(top_dir + '/', '') if filepath_notop == filepath: raise IOError("{} is not inside top {}".format(filepath, top_dir)) return "{deps_dir}/{dir}{notdir}".format( deps_dir=DEPS_DIR, dir=makefile_dir(filepath_notop), notdir=makefile_notdir(filepath_notop), ) def deps_makefile(filepath, *, top_dir=TOP_DIR): """Get deps makefile name. Python version of `$(call deps_makefile,{})` in make/deps.mk >>> td = os.path.abspath(os.curdir) >>> deps_makefile("./a/blah", top_dir=td) '.deps/a/blah.dmk' >>> deps_makefile("blah", top_dir=td) '.deps/blah.dmk' """ return deps_dir( "{dir}{notdir}{ext}".format( dir=makefile_dir(filepath), notdir=makefile_notdir(filepath), ext=DEPMK_EXT, ), top_dir=top_dir) def add_dependency(f, from_file, on_file): """Record a dependency from file on file.""" f.write(""" $(call add_dependency,{from_file},{on_file}) """.format( from_file=from_file, on_file=on_file, )) def write_deps(inputfile_name, data): deps_filename = deps_makefile(inputfile_name) with open(deps_filename, "w") as f: f.write(data.getvalue()) print("Generated dependency info", deps_filename) if __name__ == "__main__": import doctest doctest.testmod()
#!/usr/bin/env python3 """ This file needs to be kept in sync with ../../make/deps.mk """ import os import os.path MY_DIR = os.path.dirname(os.path.abspath(__file__)) TOP_DIR = os.path.abspath(os.path.join(MY_DIR, "..", "..")) DEPS_DIR = ".deps" DEPS_EXT = ".d" DEPMK_EXT = ".dmk" def makefile_dir(filepath): """Get the directory part of a path in the same way make does. Python version of the makefile `$(dir xxx)` function. >>> makefile_dir("blah1") '' >>> makefile_dir("a/blah2") 'a/' >>> makefile_dir("../b/blah3") '../b/' >>> makefile_dir("./blah4") '' >>> makefile_dir("/abc/blah6") '/abc/' >>> makefile_dir("/blah5") '/' """ dirname = os.path.dirname(os.path.normpath(filepath)) if not dirname or dirname == '.': return '' if dirname[-1] != '/': dirname += '/' return dirname def makefile_notdir(filepath): """Get the non-directory part of a path in the same way make does. Python version of the makefile `$(nodir xxxx)` function. >>> makefile_notdir("blah1") 'blah1' >>> makefile_notdir("a/blah2") 'blah2' >>> makefile_notdir("../b/blah3") 'blah3' >>> makefile_notdir("blah4/") '' >>> makefile_notdir("/blah5") 'blah5' >>> makefile_notdir("/abc/blah6") 'blah6' """ return os.path.basename(filepath) def deps_dir(filepath, *, top_dir=TOP_DIR): """Get the directory to put dependencies files into. >>> td = os.path.abspath(os.curdir) >>> deps_dir("./a/blah", top_dir=td) '.deps/a/blah' >>> deps_dir("blah", top_dir=td) '.deps/blah' >>> deps_dir("blah.abc", top_dir=td) '.deps/blah.abc' >>> deps_dir("/abc3/blah", top_dir='/abc3') '.deps/blah' >>> deps_dir("/abc3/blah", top_dir='/abc4') Traceback (most recent call last): ... OSError: /abc3/blah is not inside top /abc4 """ filepath = os.path.normpath(filepath) if filepath[0] != '/': filepath = os.path.abspath(filepath) filepath_notop = filepath.replace(top_dir + '/', '') if filepath_notop == filepath: raise IOError("{} is not inside top {}".format(filepath, top_dir)) return "{deps_dir}/{dir}{notdir}".format( deps_dir=DEPS_DIR, dir=makefile_dir(filepath_notop), notdir=makefile_notdir(filepath_notop), ) def deps_makefile(filepath, *, top_dir=TOP_DIR): """Get deps makefile name. Python version of `$(call deps_makefile,{})` in make/deps.mk >>> td = os.path.abspath(os.curdir) >>> deps_makefile("./a/blah", top_dir=td) '.deps/a/blah.dmk' >>> deps_makefile("blah", top_dir=td) '.deps/blah.dmk' """ return deps_dir( "{dir}{notdir}{ext}".format( dir=makefile_dir(filepath), notdir=makefile_notdir(filepath), ext=DEPMK_EXT, ), top_dir=top_dir) def add_dependency(f, from_file, on_file): """Record a dependency from file on file.""" f.write(""" $(call add_dependency,{from_file},{on_file}) """.format( from_file=from_file, on_file=on_file, )) def write_deps(inputfile_name, data): deps_filename = deps_makefile(inputfile_name) with open(deps_filename, "w") as f: f.write(data.getvalue()) print("Generated dependency info", deps_filename) if __name__ == "__main__": import doctest doctest.testmod()
en
0.37242
#!/usr/bin/env python3 This file needs to be kept in sync with ../../make/deps.mk Get the directory part of a path in the same way make does. Python version of the makefile `$(dir xxx)` function. >>> makefile_dir("blah1") '' >>> makefile_dir("a/blah2") 'a/' >>> makefile_dir("../b/blah3") '../b/' >>> makefile_dir("./blah4") '' >>> makefile_dir("/abc/blah6") '/abc/' >>> makefile_dir("/blah5") '/' Get the non-directory part of a path in the same way make does. Python version of the makefile `$(nodir xxxx)` function. >>> makefile_notdir("blah1") 'blah1' >>> makefile_notdir("a/blah2") 'blah2' >>> makefile_notdir("../b/blah3") 'blah3' >>> makefile_notdir("blah4/") '' >>> makefile_notdir("/blah5") 'blah5' >>> makefile_notdir("/abc/blah6") 'blah6' Get the directory to put dependencies files into. >>> td = os.path.abspath(os.curdir) >>> deps_dir("./a/blah", top_dir=td) '.deps/a/blah' >>> deps_dir("blah", top_dir=td) '.deps/blah' >>> deps_dir("blah.abc", top_dir=td) '.deps/blah.abc' >>> deps_dir("/abc3/blah", top_dir='/abc3') '.deps/blah' >>> deps_dir("/abc3/blah", top_dir='/abc4') Traceback (most recent call last): ... OSError: /abc3/blah is not inside top /abc4 Get deps makefile name. Python version of `$(call deps_makefile,{})` in make/deps.mk >>> td = os.path.abspath(os.curdir) >>> deps_makefile("./a/blah", top_dir=td) '.deps/a/blah.dmk' >>> deps_makefile("blah", top_dir=td) '.deps/blah.dmk' Record a dependency from file on file. $(call add_dependency,{from_file},{on_file})
3.095222
3
jbank/migrations/0066_wsediconnection_pin.py
bachvtuan/django-jbank
0
6629786
<gh_stars>0 # Generated by Django 3.0.8 on 2020-07-17 23:22 from django.db import migrations import jutil.modelfields class Migration(migrations.Migration): dependencies = [ ("jbank", "0065_wsediconnection_bank_root_cert_file"), ] operations = [ migrations.AddField( model_name="wsediconnection", name="pin", field=jutil.modelfields.SafeCharField(blank=True, default="", max_length=64, verbose_name="PIN"), ), ]
# Generated by Django 3.0.8 on 2020-07-17 23:22 from django.db import migrations import jutil.modelfields class Migration(migrations.Migration): dependencies = [ ("jbank", "0065_wsediconnection_bank_root_cert_file"), ] operations = [ migrations.AddField( model_name="wsediconnection", name="pin", field=jutil.modelfields.SafeCharField(blank=True, default="", max_length=64, verbose_name="PIN"), ), ]
en
0.79023
# Generated by Django 3.0.8 on 2020-07-17 23:22
1.629836
2
ttt/game/admin.py
idegtiarov/ttt-wg
0
6629787
from django.contrib import admin from .models import Progress @admin.register(Progress) class ProgressAdmin(admin.ModelAdmin): pass
from django.contrib import admin from .models import Progress @admin.register(Progress) class ProgressAdmin(admin.ModelAdmin): pass
none
1
1.117385
1
rdmo/core/constants.py
m6121/rdmo
77
6629788
<gh_stars>10-100 from django.utils.translation import gettext_lazy as _ VALUE_TYPE_TEXT = 'text' VALUE_TYPE_URL = 'url' VALUE_TYPE_INTEGER = 'integer' VALUE_TYPE_FLOAT = 'float' VALUE_TYPE_BOOLEAN = 'boolean' VALUE_TYPE_DATETIME = 'datetime' VALUE_TYPE_OPTIONS = 'option' VALUE_TYPE_FILE = 'file' VALUE_TYPE_CHOICES = ( (VALUE_TYPE_TEXT, _('Text')), (VALUE_TYPE_URL, _('URL')), (VALUE_TYPE_INTEGER, _('Integer')), (VALUE_TYPE_FLOAT, _('Float')), (VALUE_TYPE_BOOLEAN, _('Boolean')), (VALUE_TYPE_DATETIME, _('Datetime')), (VALUE_TYPE_OPTIONS, _('Option')), (VALUE_TYPE_FILE, _('File')) ) PERMISSIONS = { 'condition': ( 'conditions.add_condition', 'conditions.change_condition', 'conditions.delete_condition' ), 'attribute': ( 'domain.add_attribute', 'domain.change_attribute', 'domain.delete_attribute' ), 'optionset': ( 'options.add_optionset', 'options.change_optionset', 'options.delete_optionset' ), 'option': ( 'options.add_option', 'options.change_option', 'options.delete_option' ), 'catalog': ( 'questions.add_catalog', 'questions.change_catalog', 'questions.delete_catalog' ), 'section': ( 'questions.add_section', 'questions.change_section', 'questions.delete_section' ), 'questionset': ( 'questions.add_questionset', 'questions.change_questionset', 'questions.delete_questionset' ), 'question': ( 'questions.add_question', 'questions.change_question', 'questions.delete_question' ), 'task': ( 'tasks.add_task', 'tasks.change_task', 'tasks.delete_task' ), 'view': ( 'views.add_view', 'views.change_view', 'views.delete_view' ) }
from django.utils.translation import gettext_lazy as _ VALUE_TYPE_TEXT = 'text' VALUE_TYPE_URL = 'url' VALUE_TYPE_INTEGER = 'integer' VALUE_TYPE_FLOAT = 'float' VALUE_TYPE_BOOLEAN = 'boolean' VALUE_TYPE_DATETIME = 'datetime' VALUE_TYPE_OPTIONS = 'option' VALUE_TYPE_FILE = 'file' VALUE_TYPE_CHOICES = ( (VALUE_TYPE_TEXT, _('Text')), (VALUE_TYPE_URL, _('URL')), (VALUE_TYPE_INTEGER, _('Integer')), (VALUE_TYPE_FLOAT, _('Float')), (VALUE_TYPE_BOOLEAN, _('Boolean')), (VALUE_TYPE_DATETIME, _('Datetime')), (VALUE_TYPE_OPTIONS, _('Option')), (VALUE_TYPE_FILE, _('File')) ) PERMISSIONS = { 'condition': ( 'conditions.add_condition', 'conditions.change_condition', 'conditions.delete_condition' ), 'attribute': ( 'domain.add_attribute', 'domain.change_attribute', 'domain.delete_attribute' ), 'optionset': ( 'options.add_optionset', 'options.change_optionset', 'options.delete_optionset' ), 'option': ( 'options.add_option', 'options.change_option', 'options.delete_option' ), 'catalog': ( 'questions.add_catalog', 'questions.change_catalog', 'questions.delete_catalog' ), 'section': ( 'questions.add_section', 'questions.change_section', 'questions.delete_section' ), 'questionset': ( 'questions.add_questionset', 'questions.change_questionset', 'questions.delete_questionset' ), 'question': ( 'questions.add_question', 'questions.change_question', 'questions.delete_question' ), 'task': ( 'tasks.add_task', 'tasks.change_task', 'tasks.delete_task' ), 'view': ( 'views.add_view', 'views.change_view', 'views.delete_view' ) }
none
1
1.861901
2
kornia/contrib/spatial_soft_argmax2d.py
timaebi/kornia
0
6629789
import torch import torch.nn as nn import torch.nn.functional as F from kornia.utils import create_meshgrid def spatial_soft_argmax2d( input: torch.Tensor, temperature: torch.Tensor = torch.tensor(1.0), normalized_coordinates: bool = True, eps: float = 1e-8) -> torch.Tensor: r"""Function that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: temperature (torch.Tensor): factor to apply to input. Default is 1. normalized_coordinates (bool): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. eps (float): small value to avoid zero division. Default is 1e-8. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples: >>> input = torch.tensor([[[ [0., 0., 0.], [0., 10., 0.], [0., 0., 0.]]]]) >>> coords = kornia.spatial_soft_argmax2d(input, False) tensor([[[1.0000, 1.0000]]]) """ if not torch.is_tensor(input): raise TypeError("Input input type is not a torch.Tensor. Got {}" .format(type(input))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) # unpack shapes and create view from input tensor batch_size, channels, height, width = input.shape x: torch.Tensor = input.view(batch_size, channels, -1) # compute softmax along the feature map x_soft: torch.Tensor = F.softmax(x * temperature, dim=-1) # create coordinates grid grid: torch.Tensor = create_meshgrid( height, width, normalized_coordinates) grid = grid.to(input.device).to(input.dtype) pos_x: torch.Tensor = grid[..., 0].reshape(-1) pos_y: torch.Tensor = grid[..., 1].reshape(-1) # compute the expected coordinates expected_y: torch.Tensor = torch.sum(pos_y * x_soft, dim=-1, keepdim=True) expected_x: torch.Tensor = torch.sum(pos_x * x_soft, dim=-1, keepdim=True) output: torch.Tensor = torch.cat([expected_x, expected_y], dim=-1) return output.view(batch_size, channels, 2) # BxNx2 class SpatialSoftArgmax2d(nn.Module): r"""Function that computes the Spatial Soft-Argmax 2D of a given heatmap. See :class:`~kornia.contrib.spatial_soft_argmax2d` for details. """ def __init__(self, temperature: torch.Tensor = torch.tensor(1.0), normalized_coordinates: bool = True, eps: float = 1e-8) -> None: super(SpatialSoftArgmax2d, self).__init__() self.temperature: torch.Tensor = temperature self.normalized_coordinates: bool = normalized_coordinates self.eps: float = eps def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return spatial_soft_argmax2d(input, self.temperature, self.normalized_coordinates, self.eps)
import torch import torch.nn as nn import torch.nn.functional as F from kornia.utils import create_meshgrid def spatial_soft_argmax2d( input: torch.Tensor, temperature: torch.Tensor = torch.tensor(1.0), normalized_coordinates: bool = True, eps: float = 1e-8) -> torch.Tensor: r"""Function that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: temperature (torch.Tensor): factor to apply to input. Default is 1. normalized_coordinates (bool): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. eps (float): small value to avoid zero division. Default is 1e-8. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples: >>> input = torch.tensor([[[ [0., 0., 0.], [0., 10., 0.], [0., 0., 0.]]]]) >>> coords = kornia.spatial_soft_argmax2d(input, False) tensor([[[1.0000, 1.0000]]]) """ if not torch.is_tensor(input): raise TypeError("Input input type is not a torch.Tensor. Got {}" .format(type(input))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) # unpack shapes and create view from input tensor batch_size, channels, height, width = input.shape x: torch.Tensor = input.view(batch_size, channels, -1) # compute softmax along the feature map x_soft: torch.Tensor = F.softmax(x * temperature, dim=-1) # create coordinates grid grid: torch.Tensor = create_meshgrid( height, width, normalized_coordinates) grid = grid.to(input.device).to(input.dtype) pos_x: torch.Tensor = grid[..., 0].reshape(-1) pos_y: torch.Tensor = grid[..., 1].reshape(-1) # compute the expected coordinates expected_y: torch.Tensor = torch.sum(pos_y * x_soft, dim=-1, keepdim=True) expected_x: torch.Tensor = torch.sum(pos_x * x_soft, dim=-1, keepdim=True) output: torch.Tensor = torch.cat([expected_x, expected_y], dim=-1) return output.view(batch_size, channels, 2) # BxNx2 class SpatialSoftArgmax2d(nn.Module): r"""Function that computes the Spatial Soft-Argmax 2D of a given heatmap. See :class:`~kornia.contrib.spatial_soft_argmax2d` for details. """ def __init__(self, temperature: torch.Tensor = torch.tensor(1.0), normalized_coordinates: bool = True, eps: float = 1e-8) -> None: super(SpatialSoftArgmax2d, self).__init__() self.temperature: torch.Tensor = temperature self.normalized_coordinates: bool = normalized_coordinates self.eps: float = eps def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return spatial_soft_argmax2d(input, self.temperature, self.normalized_coordinates, self.eps)
en
0.555851
Function that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: temperature (torch.Tensor): factor to apply to input. Default is 1. normalized_coordinates (bool): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. eps (float): small value to avoid zero division. Default is 1e-8. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples: >>> input = torch.tensor([[[ [0., 0., 0.], [0., 10., 0.], [0., 0., 0.]]]]) >>> coords = kornia.spatial_soft_argmax2d(input, False) tensor([[[1.0000, 1.0000]]]) # unpack shapes and create view from input tensor # compute softmax along the feature map # create coordinates grid # compute the expected coordinates # BxNx2 Function that computes the Spatial Soft-Argmax 2D of a given heatmap. See :class:`~kornia.contrib.spatial_soft_argmax2d` for details. # type: ignore
2.974088
3
pytealutils/transaction/__init__.py
nullun/pyteal-utils
20
6629790
<gh_stars>10-100 from .inner_transactions import axfer, pay from .transaction import assert_no_asset_close_to, assert_no_close_to, assert_no_rekey
from .inner_transactions import axfer, pay from .transaction import assert_no_asset_close_to, assert_no_close_to, assert_no_rekey
none
1
1.025323
1
instance/song.py
atipi/FlaskMongodbRESTApi_Demo
0
6629791
# -*- coding: utf-8 -*- __version__ = '0.1.0' __author__ = '<NAME>' import bson from flask import json, current_app app = current_app def create_from_file(file_path=None): """ Import song data from JSON file. :param file_path: full file path to JSON file :return: status: True if creation is done successfully """ if file_path is None: raise ValueError("Require JSON file path") status = False with open(file_path) as json_file: data = json.load(json_file) app.logger.debug('== JSON data: %s', data) for one_row in data: app.logger.debug('one_row: %s', one_row) Song().create(**one_row) status = True return status def convert_to_list(listItem=None): """ Convert data to list :param listItem: list of items for converting :return: list: list of valid dictionary data """ output = [] for s_dict in listItem: item_dict = get_dict_data(s_dict) output.append(item_dict) # app.logger.debug('== output: %s', output) return output def get_dict_data(data_dict=None): """ Replace ObjectId to string data type inside dictionary :param data_dict: dictionary data for converting :return: item_dict: formatted dictionary data """ item_dict = {} for key in data_dict: if key == '_id' or key == 'song_id': item_dict[key] = str(data_dict['_id']) else: item_dict[key] = data_dict[key] return item_dict class Song(object): """ Class object for song management """ _mongo = None def __init__(self): """ Initiate PyMongo object for the class """ self._mongo = app.config['mongodb'] def get_dbnames(self): """ Get all collection names inside the database :return: dbnames_list: list of collection names """ dbnames_list = self._mongo.db.list_collection_names() app.logger.debug('== dbnames_list: %s', dbnames_list) app.logger.debug('== dbnames_list: %s', len(dbnames_list)) return dbnames_list def drop_database(self): """ Drop database :return: """ self._mongo.db.command("dropDatabase") def create(self, **kwargs): """ Add row to songs collection. :param kwargs: dictionary of data :return: created_id: string of created object id """ app.logger.debug('CREATE args: %s', kwargs) created_id = self._mongo.db.songs.insert_one(kwargs).inserted_id created_id = str(created_id) app.logger.debug('created_id: %s', created_id) return created_id def get_doc_from_cursor(self, cursor=None): """ Get document from cursor object. :param cursor: :return: """ for document in cursor: # app.logger.debug('== document: %s', document) return document def list_all(self): """ List all rows in songs collection :return: list: list of dictionary data of a song """ songs = self._mongo.db.songs # app.logger.debug('songs: %s', songs) output = convert_to_list(songs.find()) return output def list(self, page_size=1, page_number=None): """ List data rows from songs collection or certain set of data with pagination. :param page_size: number of row per page :param page_number: page number for displaying :param skip_value: number of row for jumping :return: list: list of dictionary data of a song """ songs = None if page_number == 1: songs = self._mongo.db.songs.find().limit(int(page_size)) else: if page_number > 1: next_skip = (int(page_size) * int(page_number)) - 1 songs = self._mongo.db.songs.find().skip(next_skip).limit(int(page_size)) output = convert_to_list(songs) return output def search_by(self, key_search=None): """ Search songs by given artist name or title string. :param key_search: string for searching :return: list: list of dictionary data of a song """ app.logger.debug('key_search: %s', key_search) songs = self._mongo.db.songs.find({'$or': [ {'artist': {'$regex': key_search, '$options': 'i'}}, {'title': {'$regex': key_search, '$options': 'i'}}]}) output = convert_to_list(songs) return output def search_by_level(self, level_value=None): """ Search songs by level value. :param level_value: integer value of level for searching :return: list: list of dictionary data of a song """ songs = self._mongo.db.songs.find({"level": int(level_value)}) output = convert_to_list(songs) return output def get_average_level(self): """ Get average level of all songs :return: float value of average level value """ cursor = self._mongo.db.songs.aggregate([ { '$group': { '_id': "$id", "avg_level": {'$avg': '$level'} } } ]) # app.logger.debug('== get_average_level cursor: %s', cursor) avg_value = None for document in cursor: # app.logger.debug('== get_average_level document: %s', document) avg_value = document["avg_level"] return float(avg_value) def get_average_difficulty(self): """ Get average difficulty value of all songs :return: float value of average difficulty value """ cursor = self._mongo.db.songs.aggregate([ { '$group': { '_id': "$id", "avg_level": {'$avg': '$difficulty'} } } ]) # app.logger.debug('== get_average_difficulty cursor: %s', cursor) avg_value = None for document in cursor: # app.logger.debug('== get_average_difficulty document: %s', document) avg_value = document["avg_level"] return float(avg_value) def delete(self, song_id=None): """ Delete a row from songs collection. :param song_id: string of song object id :return: status: boolean value of operation status """ if song_id is None: raise ValueError("Missing song_id parameter") song_id = bson.ObjectId(str(song_id)) # db_response contains DeleteResult object db_response = self._mongo.db.songs.delete_one({'_id': song_id}) app.logger.debug('DELETE - db_response count: %s', db_response.deleted_count) if db_response.deleted_count == 1: return True else: return False
# -*- coding: utf-8 -*- __version__ = '0.1.0' __author__ = '<NAME>' import bson from flask import json, current_app app = current_app def create_from_file(file_path=None): """ Import song data from JSON file. :param file_path: full file path to JSON file :return: status: True if creation is done successfully """ if file_path is None: raise ValueError("Require JSON file path") status = False with open(file_path) as json_file: data = json.load(json_file) app.logger.debug('== JSON data: %s', data) for one_row in data: app.logger.debug('one_row: %s', one_row) Song().create(**one_row) status = True return status def convert_to_list(listItem=None): """ Convert data to list :param listItem: list of items for converting :return: list: list of valid dictionary data """ output = [] for s_dict in listItem: item_dict = get_dict_data(s_dict) output.append(item_dict) # app.logger.debug('== output: %s', output) return output def get_dict_data(data_dict=None): """ Replace ObjectId to string data type inside dictionary :param data_dict: dictionary data for converting :return: item_dict: formatted dictionary data """ item_dict = {} for key in data_dict: if key == '_id' or key == 'song_id': item_dict[key] = str(data_dict['_id']) else: item_dict[key] = data_dict[key] return item_dict class Song(object): """ Class object for song management """ _mongo = None def __init__(self): """ Initiate PyMongo object for the class """ self._mongo = app.config['mongodb'] def get_dbnames(self): """ Get all collection names inside the database :return: dbnames_list: list of collection names """ dbnames_list = self._mongo.db.list_collection_names() app.logger.debug('== dbnames_list: %s', dbnames_list) app.logger.debug('== dbnames_list: %s', len(dbnames_list)) return dbnames_list def drop_database(self): """ Drop database :return: """ self._mongo.db.command("dropDatabase") def create(self, **kwargs): """ Add row to songs collection. :param kwargs: dictionary of data :return: created_id: string of created object id """ app.logger.debug('CREATE args: %s', kwargs) created_id = self._mongo.db.songs.insert_one(kwargs).inserted_id created_id = str(created_id) app.logger.debug('created_id: %s', created_id) return created_id def get_doc_from_cursor(self, cursor=None): """ Get document from cursor object. :param cursor: :return: """ for document in cursor: # app.logger.debug('== document: %s', document) return document def list_all(self): """ List all rows in songs collection :return: list: list of dictionary data of a song """ songs = self._mongo.db.songs # app.logger.debug('songs: %s', songs) output = convert_to_list(songs.find()) return output def list(self, page_size=1, page_number=None): """ List data rows from songs collection or certain set of data with pagination. :param page_size: number of row per page :param page_number: page number for displaying :param skip_value: number of row for jumping :return: list: list of dictionary data of a song """ songs = None if page_number == 1: songs = self._mongo.db.songs.find().limit(int(page_size)) else: if page_number > 1: next_skip = (int(page_size) * int(page_number)) - 1 songs = self._mongo.db.songs.find().skip(next_skip).limit(int(page_size)) output = convert_to_list(songs) return output def search_by(self, key_search=None): """ Search songs by given artist name or title string. :param key_search: string for searching :return: list: list of dictionary data of a song """ app.logger.debug('key_search: %s', key_search) songs = self._mongo.db.songs.find({'$or': [ {'artist': {'$regex': key_search, '$options': 'i'}}, {'title': {'$regex': key_search, '$options': 'i'}}]}) output = convert_to_list(songs) return output def search_by_level(self, level_value=None): """ Search songs by level value. :param level_value: integer value of level for searching :return: list: list of dictionary data of a song """ songs = self._mongo.db.songs.find({"level": int(level_value)}) output = convert_to_list(songs) return output def get_average_level(self): """ Get average level of all songs :return: float value of average level value """ cursor = self._mongo.db.songs.aggregate([ { '$group': { '_id': "$id", "avg_level": {'$avg': '$level'} } } ]) # app.logger.debug('== get_average_level cursor: %s', cursor) avg_value = None for document in cursor: # app.logger.debug('== get_average_level document: %s', document) avg_value = document["avg_level"] return float(avg_value) def get_average_difficulty(self): """ Get average difficulty value of all songs :return: float value of average difficulty value """ cursor = self._mongo.db.songs.aggregate([ { '$group': { '_id': "$id", "avg_level": {'$avg': '$difficulty'} } } ]) # app.logger.debug('== get_average_difficulty cursor: %s', cursor) avg_value = None for document in cursor: # app.logger.debug('== get_average_difficulty document: %s', document) avg_value = document["avg_level"] return float(avg_value) def delete(self, song_id=None): """ Delete a row from songs collection. :param song_id: string of song object id :return: status: boolean value of operation status """ if song_id is None: raise ValueError("Missing song_id parameter") song_id = bson.ObjectId(str(song_id)) # db_response contains DeleteResult object db_response = self._mongo.db.songs.delete_one({'_id': song_id}) app.logger.debug('DELETE - db_response count: %s', db_response.deleted_count) if db_response.deleted_count == 1: return True else: return False
en
0.597857
# -*- coding: utf-8 -*- Import song data from JSON file. :param file_path: full file path to JSON file :return: status: True if creation is done successfully Convert data to list :param listItem: list of items for converting :return: list: list of valid dictionary data # app.logger.debug('== output: %s', output) Replace ObjectId to string data type inside dictionary :param data_dict: dictionary data for converting :return: item_dict: formatted dictionary data Class object for song management Initiate PyMongo object for the class Get all collection names inside the database :return: dbnames_list: list of collection names Drop database :return: Add row to songs collection. :param kwargs: dictionary of data :return: created_id: string of created object id Get document from cursor object. :param cursor: :return: # app.logger.debug('== document: %s', document) List all rows in songs collection :return: list: list of dictionary data of a song # app.logger.debug('songs: %s', songs) List data rows from songs collection or certain set of data with pagination. :param page_size: number of row per page :param page_number: page number for displaying :param skip_value: number of row for jumping :return: list: list of dictionary data of a song Search songs by given artist name or title string. :param key_search: string for searching :return: list: list of dictionary data of a song Search songs by level value. :param level_value: integer value of level for searching :return: list: list of dictionary data of a song Get average level of all songs :return: float value of average level value # app.logger.debug('== get_average_level cursor: %s', cursor) # app.logger.debug('== get_average_level document: %s', document) Get average difficulty value of all songs :return: float value of average difficulty value # app.logger.debug('== get_average_difficulty cursor: %s', cursor) # app.logger.debug('== get_average_difficulty document: %s', document) Delete a row from songs collection. :param song_id: string of song object id :return: status: boolean value of operation status # db_response contains DeleteResult object
3.155267
3
cirq-google/cirq_google/engine/abstract_local_program.py
kevinsung/Cirq
1
6629792
<filename>cirq-google/cirq_google/engine/abstract_local_program.py # Copyright 2021 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime from typing import Dict, List, Optional, Sequence, Set, TYPE_CHECKING, Union import cirq from cirq_google.engine.client import quantum from cirq_google.engine.abstract_program import AbstractProgram if TYPE_CHECKING: from cirq_google.engine.abstract_local_job import AbstractLocalJob from cirq_google.engine.abstract_local_engine import AbstractLocalEngine class AbstractLocalProgram(AbstractProgram): """A quantum program designed for local in-memory computation. This implements all the methods in `AbstractProgram` using in-memory objects. Labels, descriptions, and time are all stored using dictionaries. This is a partially implemented instance. Inheritors will still need to implement abstract methods. """ def __init__(self, circuits: List[cirq.Circuit], engine: 'AbstractLocalEngine'): if not circuits: raise ValueError('No circuits provided to program.') self._create_time = datetime.datetime.now() self._update_time = datetime.datetime.now() self._description = '' self._labels: Dict[str, str] = {} self._engine = engine self._jobs: Dict[str, 'AbstractLocalJob'] = {} self._circuits = circuits def engine(self) -> 'AbstractLocalEngine': """Returns the parent Engine object. Returns: The program's parent Engine. """ return self._engine def add_job(self, job_id: str, job: 'AbstractLocalJob') -> None: self._jobs[job_id] = job def get_job(self, job_id: str) -> 'AbstractLocalJob': """Returns an AbstractLocalJob for an existing Quantum Engine job. Args: job_id: Unique ID of the job within the parent program. Returns: A AbstractLocalJob for this program. Raises: KeyError: if job is not found. """ if job_id in self._jobs: return self._jobs[job_id] raise KeyError(f'job {job_id} not found') def list_jobs( self, created_before: Optional[Union[datetime.datetime, datetime.date]] = None, created_after: Optional[Union[datetime.datetime, datetime.date]] = None, has_labels: Optional[Dict[str, str]] = None, execution_states: Optional[Set[quantum.enums.ExecutionStatus.State]] = None, ) -> Sequence['AbstractLocalJob']: """Returns the list of jobs for this program. Args: created_after: retrieve jobs that were created after this date or time. created_before: retrieve jobs that were created before this date or time. has_labels: retrieve jobs that have labels on them specified by this dict. If the value is set to `*`, filters having the label regardless of the label value will be filtered. For example, to query programs that have the shape label and have the color label with value red can be queried using {'color': 'red', 'shape':'*'} execution_states: retrieve jobs that have an execution state that is contained in `execution_states`. See `quantum.enums.ExecutionStatus.State` enum for accepted values. """ job_list = [] for job in self._jobs.values(): if created_before and job.create_time() > created_before: continue if created_after and job.create_time() < created_after: continue if execution_states: if job.execution_status() not in execution_states: continue if has_labels: job_labels = job.labels() if not all( label in job_labels and job_labels[label] == has_labels[label] for label in has_labels ): continue job_list.append(job) return job_list def create_time(self) -> 'datetime.datetime': """Returns when the program was created.""" return self._create_time def update_time(self) -> 'datetime.datetime': """Returns when the program was last updated.""" return self._update_time def description(self) -> str: """Returns the description of the program.""" return self._description def set_description(self, description: str) -> 'AbstractProgram': """Sets the description of the program. Params: description: The new description for the program. Returns: This AbstractProgram. """ self._description = description return self def labels(self) -> Dict[str, str]: """Returns the labels of the program.""" return copy.copy(self._labels) def set_labels(self, labels: Dict[str, str]) -> 'AbstractProgram': """Sets (overwriting) the labels for a previously created quantum program. Params: labels: The entire set of new program labels. Returns: This AbstractProgram. """ self._labels = copy.copy(labels) return self def add_labels(self, labels: Dict[str, str]) -> 'AbstractProgram': """Adds new labels to a previously created quantum program. Params: labels: New labels to add to the existing program labels. Returns: This AbstractProgram. """ for key in labels: self._labels[key] = labels[key] return self def remove_labels(self, keys: List[str]) -> 'AbstractProgram': """Removes labels with given keys from the labels of a previously created quantum program. Params: label_keys: Label keys to remove from the existing program labels. Returns: This AbstractProgram. """ for key in keys: del self._labels[key] return self def get_circuit(self, program_num: Optional[int] = None) -> cirq.Circuit: """Returns the cirq Circuit for the program. This is only supported if the program was created with the V2 protos. Args: program_num: if this is a batch program, the index of the circuit in the batch. This argument is zero-indexed. Negative values indexing from the end of the list. Returns: The program's cirq Circuit. """ if program_num: return self._circuits[program_num] return self._circuits[0] def batch_size(self) -> int: """Returns the number of programs in a batch program. """ return len(self._circuits)
<filename>cirq-google/cirq_google/engine/abstract_local_program.py # Copyright 2021 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime from typing import Dict, List, Optional, Sequence, Set, TYPE_CHECKING, Union import cirq from cirq_google.engine.client import quantum from cirq_google.engine.abstract_program import AbstractProgram if TYPE_CHECKING: from cirq_google.engine.abstract_local_job import AbstractLocalJob from cirq_google.engine.abstract_local_engine import AbstractLocalEngine class AbstractLocalProgram(AbstractProgram): """A quantum program designed for local in-memory computation. This implements all the methods in `AbstractProgram` using in-memory objects. Labels, descriptions, and time are all stored using dictionaries. This is a partially implemented instance. Inheritors will still need to implement abstract methods. """ def __init__(self, circuits: List[cirq.Circuit], engine: 'AbstractLocalEngine'): if not circuits: raise ValueError('No circuits provided to program.') self._create_time = datetime.datetime.now() self._update_time = datetime.datetime.now() self._description = '' self._labels: Dict[str, str] = {} self._engine = engine self._jobs: Dict[str, 'AbstractLocalJob'] = {} self._circuits = circuits def engine(self) -> 'AbstractLocalEngine': """Returns the parent Engine object. Returns: The program's parent Engine. """ return self._engine def add_job(self, job_id: str, job: 'AbstractLocalJob') -> None: self._jobs[job_id] = job def get_job(self, job_id: str) -> 'AbstractLocalJob': """Returns an AbstractLocalJob for an existing Quantum Engine job. Args: job_id: Unique ID of the job within the parent program. Returns: A AbstractLocalJob for this program. Raises: KeyError: if job is not found. """ if job_id in self._jobs: return self._jobs[job_id] raise KeyError(f'job {job_id} not found') def list_jobs( self, created_before: Optional[Union[datetime.datetime, datetime.date]] = None, created_after: Optional[Union[datetime.datetime, datetime.date]] = None, has_labels: Optional[Dict[str, str]] = None, execution_states: Optional[Set[quantum.enums.ExecutionStatus.State]] = None, ) -> Sequence['AbstractLocalJob']: """Returns the list of jobs for this program. Args: created_after: retrieve jobs that were created after this date or time. created_before: retrieve jobs that were created before this date or time. has_labels: retrieve jobs that have labels on them specified by this dict. If the value is set to `*`, filters having the label regardless of the label value will be filtered. For example, to query programs that have the shape label and have the color label with value red can be queried using {'color': 'red', 'shape':'*'} execution_states: retrieve jobs that have an execution state that is contained in `execution_states`. See `quantum.enums.ExecutionStatus.State` enum for accepted values. """ job_list = [] for job in self._jobs.values(): if created_before and job.create_time() > created_before: continue if created_after and job.create_time() < created_after: continue if execution_states: if job.execution_status() not in execution_states: continue if has_labels: job_labels = job.labels() if not all( label in job_labels and job_labels[label] == has_labels[label] for label in has_labels ): continue job_list.append(job) return job_list def create_time(self) -> 'datetime.datetime': """Returns when the program was created.""" return self._create_time def update_time(self) -> 'datetime.datetime': """Returns when the program was last updated.""" return self._update_time def description(self) -> str: """Returns the description of the program.""" return self._description def set_description(self, description: str) -> 'AbstractProgram': """Sets the description of the program. Params: description: The new description for the program. Returns: This AbstractProgram. """ self._description = description return self def labels(self) -> Dict[str, str]: """Returns the labels of the program.""" return copy.copy(self._labels) def set_labels(self, labels: Dict[str, str]) -> 'AbstractProgram': """Sets (overwriting) the labels for a previously created quantum program. Params: labels: The entire set of new program labels. Returns: This AbstractProgram. """ self._labels = copy.copy(labels) return self def add_labels(self, labels: Dict[str, str]) -> 'AbstractProgram': """Adds new labels to a previously created quantum program. Params: labels: New labels to add to the existing program labels. Returns: This AbstractProgram. """ for key in labels: self._labels[key] = labels[key] return self def remove_labels(self, keys: List[str]) -> 'AbstractProgram': """Removes labels with given keys from the labels of a previously created quantum program. Params: label_keys: Label keys to remove from the existing program labels. Returns: This AbstractProgram. """ for key in keys: del self._labels[key] return self def get_circuit(self, program_num: Optional[int] = None) -> cirq.Circuit: """Returns the cirq Circuit for the program. This is only supported if the program was created with the V2 protos. Args: program_num: if this is a batch program, the index of the circuit in the batch. This argument is zero-indexed. Negative values indexing from the end of the list. Returns: The program's cirq Circuit. """ if program_num: return self._circuits[program_num] return self._circuits[0] def batch_size(self) -> int: """Returns the number of programs in a batch program. """ return len(self._circuits)
en
0.842021
# Copyright 2021 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A quantum program designed for local in-memory computation. This implements all the methods in `AbstractProgram` using in-memory objects. Labels, descriptions, and time are all stored using dictionaries. This is a partially implemented instance. Inheritors will still need to implement abstract methods. Returns the parent Engine object. Returns: The program's parent Engine. Returns an AbstractLocalJob for an existing Quantum Engine job. Args: job_id: Unique ID of the job within the parent program. Returns: A AbstractLocalJob for this program. Raises: KeyError: if job is not found. Returns the list of jobs for this program. Args: created_after: retrieve jobs that were created after this date or time. created_before: retrieve jobs that were created before this date or time. has_labels: retrieve jobs that have labels on them specified by this dict. If the value is set to `*`, filters having the label regardless of the label value will be filtered. For example, to query programs that have the shape label and have the color label with value red can be queried using {'color': 'red', 'shape':'*'} execution_states: retrieve jobs that have an execution state that is contained in `execution_states`. See `quantum.enums.ExecutionStatus.State` enum for accepted values. Returns when the program was created. Returns when the program was last updated. Returns the description of the program. Sets the description of the program. Params: description: The new description for the program. Returns: This AbstractProgram. Returns the labels of the program. Sets (overwriting) the labels for a previously created quantum program. Params: labels: The entire set of new program labels. Returns: This AbstractProgram. Adds new labels to a previously created quantum program. Params: labels: New labels to add to the existing program labels. Returns: This AbstractProgram. Removes labels with given keys from the labels of a previously created quantum program. Params: label_keys: Label keys to remove from the existing program labels. Returns: This AbstractProgram. Returns the cirq Circuit for the program. This is only supported if the program was created with the V2 protos. Args: program_num: if this is a batch program, the index of the circuit in the batch. This argument is zero-indexed. Negative values indexing from the end of the list. Returns: The program's cirq Circuit. Returns the number of programs in a batch program.
2.535386
3
src/sage/schemes/elliptic_curves/ell_tate_curve.py
bopopescu/sage-5
2
6629793
r""" Tate's parametrisation of `p`-adic curves with multiplicative reduction Let `E` be an elliptic curve defined over the `p`-adic numbers `\QQ_p`. Suppose that `E` has multiplicative reduction, i.e. that the `j`-invariant of `E` has negative valuation, say `n`. Then there exists a parameter `q` in `\ZZ_p` of valuation `n` such that the points of `E` defined over the algebraic closure `\bar{\QQ}_p` are in bijection with `\bar{\QQ}_p^{\times}\,/\, q^{\ZZ}`. More precisely there exists the series `s_4(q)` and `s_6(q)` such that the `y^2+x y = x^3 + s_4(q) x+s_6(q)` curve is isomorphic to `E` over `\bar{\QQ}_p` (or over `\QQ_p` if the reduction is *split* multiplicative). There is `p`-adic analytic map from `\bar{\QQ}^{\times}_p` to this curve with kernel `q^{\ZZ}`. Points of good reduction correspond to points of valuation `0` in `\bar{\QQ}^{\times}_p`. See chapter V of [Sil2] for more details. REFERENCES : - [Sil2] Silverman Joseph, Advanced Topics in the Arithmetic of Elliptic Curves, GTM 151, Springer 1994. AUTHORS: - <NAME> (23/05/2007): first version - <NAME> (2007-05-29): added some examples; editing. - <NAME> (04/09): reformatted docstrings. """ ###################################################################### # Copyright (C) 2007 <NAME> # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ ###################################################################### from sage.rings.integer_ring import ZZ from sage.rings.padics.factory import Qp from sage.structure.sage_object import SageObject from sage.rings.arith import LCM from sage.modular.modform.constructor import EisensteinForms, CuspForms from sage.schemes.elliptic_curves.constructor import EllipticCurve from sage.misc.functional import log from sage.misc.all import denominator, prod import sage.matrix.all as matrix class TateCurve(SageObject): r""" Tate's `p`-adic uniformisation of an elliptic curve with multiplicative reduction. .. note:: Some of the methods of this Tate curve only work when the reduction is split multiplicative over `\QQ_p`. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5); eq 5-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field sage: eq == loads(dumps(eq)) True REFERENCES : - [Sil2] <NAME>, Advanced Topics in the Arithmetic of Elliptic Curves, GTM 151, Springer 1994. """ def __init__(self,E,p): r""" INPUT: - ``E`` - an elliptic curve over the rational numbers - ``p`` - a prime where `E` has multiplicative reduction, i.e., such that `j(E)` has negative valuation. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(2); eq 2-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field """ if not p.is_prime(): raise ValueError, "p (=%s) must be a prime"%p if E.j_invariant().valuation(p) >= 0: raise ValueError, "The elliptic curve must have multiplicative reduction at %s"%p self._p = ZZ(p) self._E = E self._q = self.parameter() def __cmp__(self, other): r""" Compare self and other. TESTS:: sage: E = EllipticCurve('35a') sage: eq5 = E.tate_curve(5) sage: eq7 = E.tate_curve(7) sage: eq7 == eq7 True sage: eq7 == eq5 False """ c = cmp(type(self), type(other)) if c: return c return cmp((self._E, self._p), (other._E, other._p)) def _repr_(self): r""" Return print representation. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(2) sage: eq._repr_() '2-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field' """ s = "%s-adic Tate curve associated to the %s"%(self._p, self._E) return s def original_curve(self): r""" Returns the elliptic curve the Tate curve was constructed from. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.original_curve() Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field """ return self._E def prime(self): r""" Returns the residual characteristic `p`. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.original_curve() Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field sage: eq.prime() 5 """ return self._p def parameter(self,prec=20): r""" Returns the Tate parameter `q` such that the curve is isomorphic over the algebraic closure of `\QQ_p` to the curve `\QQ_p^{\times}/q^{\ZZ}`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parameter(prec=5) 3*5^3 + 3*5^4 + 2*5^5 + 2*5^6 + 3*5^7 + O(5^8) """ try: qE = self._q if qE.absolute_precision() >= prec: return qE except AttributeError: pass jE = self._E.j_invariant() E4 = EisensteinForms(weight=4).basis()[0] Delta = CuspForms(weight=12).basis()[0] j = (E4.q_expansion(prec+3))**3/Delta.q_expansion(prec+3) jinv = (1/j).power_series() q_in_terms_of_jinv = jinv.reversion() R = Qp(self._p,prec=prec) qE = q_in_terms_of_jinv(R(1/self._E.j_invariant())) self._q = qE return qE __sk = lambda e,k,prec: sum( [n**k*e._q**n/(1-e._q**n) for n in range(1,prec+1)] ) __delta = lambda e,prec: e._q* prod([(1-e._q**n)**24 for n in range(1,prec+1) ] ) def curve(self,prec=20): r""" Returns the `p`-adic elliptic curve of the form `y^2+x y = x^3 + s_4 x+s_6`. This curve with split multiplicative reduction is isomorphic to the given curve over the algebraic closure of `\QQ_p`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.curve(prec=5) Elliptic Curve defined by y^2 + (1+O(5^5))*x*y = x^3 + (2*5^4+5^5+2*5^6+5^7+3*5^8+O(5^9))*x + (2*5^3+5^4+2*5^5+5^7+O(5^8)) over 5-adic Field with capped relative precision 5 """ try: Eq = self.__curve if Eq.a6().absolute_precision() >= prec: return Eq except AttributeError: pass qE = self.parameter(prec=prec) n = qE.valuation() precp = (prec/n).floor() + 2; R = qE.parent() tate_a4 = -5 * self.__sk(3,precp) tate_a6 = (tate_a4 - 7 * self.__sk(5,precp) )/12 Eq = EllipticCurve([R(1),R(0),R(0),tate_a4,tate_a6]) self.__curve = Eq return Eq def _Csquare(self,prec=20): r""" Returns the square of the constant `C` such that the canonical Neron differential `\omega` and the canonical differential `\frac{du}{u}` on `\QQ^{\times}/q^{\ZZ}` are linked by `\omega = C \frac{du}{u}`. This constant is only a square in `\QQ_p` if the curve has split multiplicative reduction. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._Csquare(prec=5) 4 + 2*5^2 + 2*5^4 + O(5^5) """ try: Csq = self.__Csquare if Csq.absolute_precision() >= prec: return Csq except AttributeError: pass Eq = self.curve(prec=prec) tateCsquare = Eq.c6() * self._E.c4()/Eq.c4()/self._E.c6() self.__Csquare = tateCsquare return tateCsquare def E2(self,prec=20): r""" Returns the value of the `p`-adic Eisenstein series of weight 2 evaluated on the elliptic curve having split multiplicative reduction. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.E2(prec=10) 4 + 2*5^2 + 2*5^3 + 5^4 + 2*5^5 + 5^7 + 5^8 + 2*5^9 + O(5^10) sage: T = EllipticCurve('14').tate_curve(7) sage: T.E2(30) 2 + 4*7 + 7^2 + 3*7^3 + 6*7^4 + 5*7^5 + 2*7^6 + 7^7 + 5*7^8 + 6*7^9 + 5*7^10 + 2*7^11 + 6*7^12 + 4*7^13 + 3*7^15 + 5*7^16 + 4*7^17 + 4*7^18 + 2*7^20 + 7^21 + 5*7^22 + 4*7^23 + 4*7^24 + 3*7^25 + 6*7^26 + 3*7^27 + 6*7^28 + O(7^30) """ p = self._p Csq = self._Csquare(prec=prec) qE = self._q n = qE.valuation() R = Qp(p,prec) e2 = Csq*(1 - 24 * sum( [ qE**i/(1-qE**i)**2 for i in range(1,(prec/n).floor() + 5) ])) return R(e2) def is_split(self): r""" Returns True if the given elliptic curve has split multiplicative reduction. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.is_split() True sage: eq = EllipticCurve('37a1').tate_curve(37) sage: eq.is_split() False """ return self._Csquare().is_square() def parametrisation_onto_tate_curve(self,u,prec=20): r""" Given an element `u` in `\QQ_p^{\times}`, this computes its image on the Tate curve under the `p`-adic uniformisation of `E`. INPUT: - ``u`` - a non-zero `p`-adic number. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parametrisation_onto_tate_curve(1+5+5^2+O(5^10)) (5^-2 + 4*5^-1 + 1 + 2*5 + 3*5^2 + 2*5^5 + 3*5^6 + O(5^7) : 4*5^-3 + 2*5^-1 + 4 + 2*5 + 3*5^4 + 2*5^5 + O(5^6) : 1 + O(5^20)) """ if u == 1: return self.curve(prec=prec)(0) q = self._q un = u * q**(-(u.valuation()/q.valuation()).floor()) precn = (prec/q.valuation()).floor() + 4 # formulas in Silverman II (Advanced Topics in the Arithmetic of Elliptic curves, p. 425) xx = un/(1-un)**2 + sum( [q**n*un/(1-q**n*un)**2 + q**n/un/(1-q**n/un)**2-2*q**n/(1-q**n)**2 for n in range(1,precn) ]) yy = un**2/(1-un)**3 + sum( [q**(2*n)*un**2/(1-q**n*un)**3 - q**n/un/(1-q**n/un)**3+q**n/(1-q**n)**2 for n in range(1,precn) ]) return self.curve(prec=prec)( [xx,yy] ) # From here on all function need that the curve has split multiplicative reduction. def L_invariant(self,prec=20): r""" Returns the *mysterious* `\mathcal{L}`-invariant associated to an elliptic curve with split multiplicative reduction. One instance where this constant appears is in the exceptional case of the `p`-adic Birch and Swinnerton-Dyer conjecture as formulated in [MTT]. See [Col] for a detailed discussion. INPUT: - ``prec`` - the `p`-adic precision, default is 20. REFERENCES: - [MTT] <NAME>, <NAME>, and <NAME>, On `p`-adic analogues of the conjectures of Birch and Swinnerton-Dyer, Inventiones mathematicae 84, (1986), 1-48. - [Col] <NAME>, Invariant `\mathcal{L}` et derivees de valeurs propores de Frobenius, preprint, 2004. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.L_invariant(prec=10) 5^3 + 4*5^4 + 2*5^5 + 2*5^6 + 2*5^7 + 3*5^8 + 5^9 + O(5^10) """ if not self.is_split(): raise RuntimeError, "The curve must have split multiplicative reduction" qE = self.parameter(prec=prec) n = qE.valuation() u = qE/self._p**n # the p-adic logarithm of Iwasawa normalised by log(p) = 0 return log(u)/n def _isomorphism(self,prec=20): r""" Returns the isomorphism between ``self.curve()`` and the given curve in the form of a list ``[u,r,s,t]`` of `p`-adic numbers. For this to exist the given curve has to have split multiplicative reduction over `\QQ_p`. More precisely, if `E` has coordinates `x` and `y` and the Tate curve has coordinates `X`, `Y` with `Y^2 + XY = X^3 + s_4 X +s_6` then `X = u^2 x +r` and `Y = u^3 y +s u^2 x +t`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._isomorphism(prec=5) [2 + 3*5^2 + 2*5^3 + 4*5^4 + O(5^5), 4 + 3*5 + 4*5^2 + 2*5^3 + O(5^5), 3 + 2*5 + 5^2 + 5^3 + 2*5^4 + O(5^5), 2 + 5 + 3*5^2 + 5^3 + 5^4 + O(5^5)] """ if not self.is_split(): raise RuntimeError, "The curve must have split multiplicative reduction" Csq = self._Csquare(prec=prec+4) C = Csq.sqrt() R = Qp(self._p,prec) C = R(C) s = (C * R(self._E.a1()) -R(1))/R(2) r = (C**2*R(self._E.a2()) +s +s**2)/R(3) t = (C**3*R(self._E.a3()) - r)/R(2) return [C,r,s,t] def _inverse_isomorphism(self,prec=20): r""" Returns the isomorphism between the given curve and ``self.curve()`` in the form of a list ``[u,r,s,t]`` of `p`-adic numbers. For this to exist the given curve has to have split multiplicative reduction over `\QQ_p`. More precisely, if `E` has coordinates `x` and `y` and the Tate curve has coordinates `X`, `Y` with `Y^2 + XY = X^3 + s_4 X +s_6` then `x = u^2 X +r` and `y = u^3 Y +s u^2 X +t`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._inverse_isomorphism(prec=5) [3 + 2*5 + 3*5^3 + O(5^5), 4 + 2*5 + 4*5^3 + 3*5^4 + O(5^5), 1 + 5 + 4*5^3 + 2*5^4 + O(5^5), 5 + 2*5^2 + 3*5^4 + O(5^5)] """ if not self.is_split(): raise RuntimeError, "The curve must have split multiplicative reduction" vec = self._isomorphism(prec=prec) return [1/vec[0],-vec[1]/vec[0]**2,-vec[2]/vec[0],(vec[1]*vec[2]-vec[3])/vec[0]**3] def lift(self,P, prec = 20): r""" Given a point `P` in the formal group of the elliptic curve `E` with split multiplicative reduction, this produces an element `u` in `\QQ_p^{\times}` mapped to the point `P` by the Tate parametrisation. The algorithm return the unique such element in `1+p\ZZ_p`. INPUT: - ``P`` - a point on the elliptic curve. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5) sage: P = e([-6,10]) sage: l = eq.lift(12*P, prec=10); l 1 + 4*5 + 5^3 + 5^4 + 4*5^5 + 5^6 + 5^7 + 4*5^8 + 5^9 + O(5^10) Now we map the lift l back and check that it is indeed right.:: sage: eq.parametrisation_onto_original_curve(l) (4*5^-2 + 2*5^-1 + 4*5 + 3*5^3 + 5^4 + 2*5^5 + 4*5^6 + O(5^7) : 2*5^-3 + 5^-1 + 4 + 4*5 + 5^2 + 3*5^3 + 4*5^4 + O(5^6) : 1 + O(5^20)) sage: e5 = e.change_ring(Qp(5,9)) sage: e5(12*P) (4*5^-2 + 2*5^-1 + 4*5 + 3*5^3 + 5^4 + 2*5^5 + 4*5^6 + O(5^7) : 2*5^-3 + 5^-1 + 4 + 4*5 + 5^2 + 3*5^3 + 4*5^4 + O(5^6) : 1 + O(5^9)) """ p = self._p R = Qp(self._p,prec) if not self._E == P.curve(): raise ValueError , "The point must lie on the original curve." if not self.is_split(): raise ValueError, "The curve must have split multiplicative reduction." if P.is_zero(): return R(1) if P[0].valuation(p) >= 0: raise ValueError , "The point must lie in the formal group." Eq = self.curve(prec=prec) isom = self._isomorphism(prec=prec) C = isom[0] r = isom[1] s = isom[2] t = isom[3] xx = r + C**2 * P[0] yy = t + s * C**2 * P[0] + C**3 * P[1] try: Pq = Eq([xx,yy]) except StandardError: raise RuntimeError, "Bug : Point %s does not lie on the curve "%[xx,yy] tt = -xx/yy eqhat = Eq.formal() eqlog = eqhat.log(prec + 3) z = eqlog(tt) u = ZZ(1) fac = ZZ(1) for i in range(1,2*prec+1): fac = fac * i u = u + z**i/fac return u def parametrisation_onto_original_curve(self,u,prec=20): r""" Given an element `u` in `\QQ_p^{\times}`, this computes its image on the original curve under the `p`-adic uniformisation of `E`. INPUT: - ``u`` - a non-zero `p`-adic number. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parametrisation_onto_original_curve(1+5+5^2+O(5^10)) (4*5^-2 + 4*5^-1 + 4 + 2*5^3 + 3*5^4 + 2*5^6 + O(5^7) : 3*5^-3 + 5^-2 + 4*5^-1 + 1 + 4*5 + 5^2 + 3*5^5 + O(5^6) : 1 + O(5^20)) Here is how one gets a 4-torsion point on `E` over `\QQ_5`:: sage: R = Qp(5,10) sage: i = R(-1).sqrt() sage: T = eq.parametrisation_onto_original_curve(i); T (2 + 3*5 + 4*5^2 + 2*5^3 + 5^4 + 4*5^5 + 2*5^7 + 5^8 + 5^9 + O(5^10) : 3*5 + 5^2 + 5^4 + 3*5^5 + 3*5^7 + 2*5^8 + 4*5^9 + O(5^10) : 1 + O(5^20)) sage: 4*T (0 : 1 + O(5^20) : 0) """ if not self.is_split(): raise ValueError, "The curve must have split multiplicative reduction." P = self.parametrisation_onto_tate_curve(u,prec=20) isom = self._inverse_isomorphism(prec=prec) C = isom[0] r = isom[1] s = isom[2] t = isom[3] xx = r + C**2 * P[0] yy = t + s * C**2 * P[0] + C**3 * P[1] R = Qp(self._p,prec) E_over_Qp = self._E.base_extend(R) return E_over_Qp([xx,yy]) __padic_sigma_square = lambda e,u,prec: (u-1)**2/u* prod([((1-e._q**n*u)*(1-e._q**n/u)/(1-e._q**n)**2)**2 for n in range(1,prec+1)]) # the following functions are rather functions of the global curve than the local curve # we use the same names as for elliptic curves over rationals. def padic_height(self,prec=20): r""" Returns the canonical `p`-adic height function on the original curve. INPUT: - ``prec`` - the `p`-adic precision, default is 20. OUTPUT: - A function that can be evaluated on rational points of `E`. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5) sage: h = eq.padic_height(prec=10) sage: P=e.gens()[0] sage: h(P) 2*5^-1 + 1 + 2*5 + 2*5^2 + 3*5^3 + 3*5^6 + 5^7 + O(5^8) Check that it is a quadratic function:: sage: h(3*P)-3^2*h(P) O(5^8) """ if not self.is_split(): raise NotImplementedError, "The curve must have split multiplicative reduction" p = self._p # we will have to do it properly with David Harvey's _multiply_point(E, R, Q) n = LCM(self._E.tamagawa_numbers()) * (p-1) # this function is a closure, I don't see how to doctest it (PZ) def _height(P,check=True): if check: assert P.curve() == self._E, "the point P must lie on the curve from which the height function was created" Q = n * P cQ = denominator(Q[0]) uQ = self.lift(Q,prec = prec) si = self.__padic_sigma_square(uQ, prec=prec) nn = self._q.valuation() qEu = self._q/p**nn return -(log(si*self._Csquare()/cQ) + log(uQ)**2/log(qEu)) / n**2 return _height def padic_regulator(self,prec=20): r""" Computes the canonical `p`-adic regulator on the extended Mordell-Weil group as in [MTT] (with the correction of [Wer] and sign convention in [SW].) The `p`-adic Birch and Swinnerton-Dyer conjecture predicts that this value appears in the formula for the leading term of the `p`-adic L-function. INPUT: - ``prec`` - the `p`-adic precision, default is 20. REFERENCES: - [MTT] <NAME>, <NAME>, and <NAME>, On `p`-adic analogues of the conjectures of Birch and Swinnerton-Dyer, Inventiones mathematicae 84, (1986), 1-48. - [Wer] <NAME>, Local heights on abelian varieties and rigid analytic unifomization, Doc. Math. 3 (1998), 301-319. - [SW] <NAME> and <NAME>, Computations About Tate-Shafarevich Groups using Iwasawa theory, preprint 2009. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.padic_regulator() 2*5^-1 + 1 + 2*5 + 2*5^2 + 3*5^3 + 3*5^6 + 5^7 + 3*5^9 + 3*5^10 + 3*5^12 + 4*5^13 + 3*5^15 + 2*5^16 + 3*5^18 + 4*5^19 + O(5^20) """ prec = prec + 4 K = Qp(self._p, prec=prec) rank = self._E.rank() if rank == 0: return K(1) if not self.is_split(): raise NotImplementedError, "The p-adic regulator is not implemented for non-split multiplicative reduction." basis = self._E.gens() M = matrix.matrix(K, rank, rank, 0) height = self.padic_height(prec= prec) point_height = [height(P) for P in basis] for i in range(rank): for j in range(i+1, rank): M[i, j] = M[j, i] = (- point_height[i] - point_height[j] + height(basis[i] + basis[j]))/2 for i in range(rank): M[i,i] = point_height[i] return M.determinant()
r""" Tate's parametrisation of `p`-adic curves with multiplicative reduction Let `E` be an elliptic curve defined over the `p`-adic numbers `\QQ_p`. Suppose that `E` has multiplicative reduction, i.e. that the `j`-invariant of `E` has negative valuation, say `n`. Then there exists a parameter `q` in `\ZZ_p` of valuation `n` such that the points of `E` defined over the algebraic closure `\bar{\QQ}_p` are in bijection with `\bar{\QQ}_p^{\times}\,/\, q^{\ZZ}`. More precisely there exists the series `s_4(q)` and `s_6(q)` such that the `y^2+x y = x^3 + s_4(q) x+s_6(q)` curve is isomorphic to `E` over `\bar{\QQ}_p` (or over `\QQ_p` if the reduction is *split* multiplicative). There is `p`-adic analytic map from `\bar{\QQ}^{\times}_p` to this curve with kernel `q^{\ZZ}`. Points of good reduction correspond to points of valuation `0` in `\bar{\QQ}^{\times}_p`. See chapter V of [Sil2] for more details. REFERENCES : - [Sil2] Silverman Joseph, Advanced Topics in the Arithmetic of Elliptic Curves, GTM 151, Springer 1994. AUTHORS: - <NAME> (23/05/2007): first version - <NAME> (2007-05-29): added some examples; editing. - <NAME> (04/09): reformatted docstrings. """ ###################################################################### # Copyright (C) 2007 <NAME> # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ ###################################################################### from sage.rings.integer_ring import ZZ from sage.rings.padics.factory import Qp from sage.structure.sage_object import SageObject from sage.rings.arith import LCM from sage.modular.modform.constructor import EisensteinForms, CuspForms from sage.schemes.elliptic_curves.constructor import EllipticCurve from sage.misc.functional import log from sage.misc.all import denominator, prod import sage.matrix.all as matrix class TateCurve(SageObject): r""" Tate's `p`-adic uniformisation of an elliptic curve with multiplicative reduction. .. note:: Some of the methods of this Tate curve only work when the reduction is split multiplicative over `\QQ_p`. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5); eq 5-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field sage: eq == loads(dumps(eq)) True REFERENCES : - [Sil2] <NAME>, Advanced Topics in the Arithmetic of Elliptic Curves, GTM 151, Springer 1994. """ def __init__(self,E,p): r""" INPUT: - ``E`` - an elliptic curve over the rational numbers - ``p`` - a prime where `E` has multiplicative reduction, i.e., such that `j(E)` has negative valuation. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(2); eq 2-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field """ if not p.is_prime(): raise ValueError, "p (=%s) must be a prime"%p if E.j_invariant().valuation(p) >= 0: raise ValueError, "The elliptic curve must have multiplicative reduction at %s"%p self._p = ZZ(p) self._E = E self._q = self.parameter() def __cmp__(self, other): r""" Compare self and other. TESTS:: sage: E = EllipticCurve('35a') sage: eq5 = E.tate_curve(5) sage: eq7 = E.tate_curve(7) sage: eq7 == eq7 True sage: eq7 == eq5 False """ c = cmp(type(self), type(other)) if c: return c return cmp((self._E, self._p), (other._E, other._p)) def _repr_(self): r""" Return print representation. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(2) sage: eq._repr_() '2-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field' """ s = "%s-adic Tate curve associated to the %s"%(self._p, self._E) return s def original_curve(self): r""" Returns the elliptic curve the Tate curve was constructed from. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.original_curve() Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field """ return self._E def prime(self): r""" Returns the residual characteristic `p`. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.original_curve() Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field sage: eq.prime() 5 """ return self._p def parameter(self,prec=20): r""" Returns the Tate parameter `q` such that the curve is isomorphic over the algebraic closure of `\QQ_p` to the curve `\QQ_p^{\times}/q^{\ZZ}`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parameter(prec=5) 3*5^3 + 3*5^4 + 2*5^5 + 2*5^6 + 3*5^7 + O(5^8) """ try: qE = self._q if qE.absolute_precision() >= prec: return qE except AttributeError: pass jE = self._E.j_invariant() E4 = EisensteinForms(weight=4).basis()[0] Delta = CuspForms(weight=12).basis()[0] j = (E4.q_expansion(prec+3))**3/Delta.q_expansion(prec+3) jinv = (1/j).power_series() q_in_terms_of_jinv = jinv.reversion() R = Qp(self._p,prec=prec) qE = q_in_terms_of_jinv(R(1/self._E.j_invariant())) self._q = qE return qE __sk = lambda e,k,prec: sum( [n**k*e._q**n/(1-e._q**n) for n in range(1,prec+1)] ) __delta = lambda e,prec: e._q* prod([(1-e._q**n)**24 for n in range(1,prec+1) ] ) def curve(self,prec=20): r""" Returns the `p`-adic elliptic curve of the form `y^2+x y = x^3 + s_4 x+s_6`. This curve with split multiplicative reduction is isomorphic to the given curve over the algebraic closure of `\QQ_p`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.curve(prec=5) Elliptic Curve defined by y^2 + (1+O(5^5))*x*y = x^3 + (2*5^4+5^5+2*5^6+5^7+3*5^8+O(5^9))*x + (2*5^3+5^4+2*5^5+5^7+O(5^8)) over 5-adic Field with capped relative precision 5 """ try: Eq = self.__curve if Eq.a6().absolute_precision() >= prec: return Eq except AttributeError: pass qE = self.parameter(prec=prec) n = qE.valuation() precp = (prec/n).floor() + 2; R = qE.parent() tate_a4 = -5 * self.__sk(3,precp) tate_a6 = (tate_a4 - 7 * self.__sk(5,precp) )/12 Eq = EllipticCurve([R(1),R(0),R(0),tate_a4,tate_a6]) self.__curve = Eq return Eq def _Csquare(self,prec=20): r""" Returns the square of the constant `C` such that the canonical Neron differential `\omega` and the canonical differential `\frac{du}{u}` on `\QQ^{\times}/q^{\ZZ}` are linked by `\omega = C \frac{du}{u}`. This constant is only a square in `\QQ_p` if the curve has split multiplicative reduction. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._Csquare(prec=5) 4 + 2*5^2 + 2*5^4 + O(5^5) """ try: Csq = self.__Csquare if Csq.absolute_precision() >= prec: return Csq except AttributeError: pass Eq = self.curve(prec=prec) tateCsquare = Eq.c6() * self._E.c4()/Eq.c4()/self._E.c6() self.__Csquare = tateCsquare return tateCsquare def E2(self,prec=20): r""" Returns the value of the `p`-adic Eisenstein series of weight 2 evaluated on the elliptic curve having split multiplicative reduction. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.E2(prec=10) 4 + 2*5^2 + 2*5^3 + 5^4 + 2*5^5 + 5^7 + 5^8 + 2*5^9 + O(5^10) sage: T = EllipticCurve('14').tate_curve(7) sage: T.E2(30) 2 + 4*7 + 7^2 + 3*7^3 + 6*7^4 + 5*7^5 + 2*7^6 + 7^7 + 5*7^8 + 6*7^9 + 5*7^10 + 2*7^11 + 6*7^12 + 4*7^13 + 3*7^15 + 5*7^16 + 4*7^17 + 4*7^18 + 2*7^20 + 7^21 + 5*7^22 + 4*7^23 + 4*7^24 + 3*7^25 + 6*7^26 + 3*7^27 + 6*7^28 + O(7^30) """ p = self._p Csq = self._Csquare(prec=prec) qE = self._q n = qE.valuation() R = Qp(p,prec) e2 = Csq*(1 - 24 * sum( [ qE**i/(1-qE**i)**2 for i in range(1,(prec/n).floor() + 5) ])) return R(e2) def is_split(self): r""" Returns True if the given elliptic curve has split multiplicative reduction. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.is_split() True sage: eq = EllipticCurve('37a1').tate_curve(37) sage: eq.is_split() False """ return self._Csquare().is_square() def parametrisation_onto_tate_curve(self,u,prec=20): r""" Given an element `u` in `\QQ_p^{\times}`, this computes its image on the Tate curve under the `p`-adic uniformisation of `E`. INPUT: - ``u`` - a non-zero `p`-adic number. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parametrisation_onto_tate_curve(1+5+5^2+O(5^10)) (5^-2 + 4*5^-1 + 1 + 2*5 + 3*5^2 + 2*5^5 + 3*5^6 + O(5^7) : 4*5^-3 + 2*5^-1 + 4 + 2*5 + 3*5^4 + 2*5^5 + O(5^6) : 1 + O(5^20)) """ if u == 1: return self.curve(prec=prec)(0) q = self._q un = u * q**(-(u.valuation()/q.valuation()).floor()) precn = (prec/q.valuation()).floor() + 4 # formulas in Silverman II (Advanced Topics in the Arithmetic of Elliptic curves, p. 425) xx = un/(1-un)**2 + sum( [q**n*un/(1-q**n*un)**2 + q**n/un/(1-q**n/un)**2-2*q**n/(1-q**n)**2 for n in range(1,precn) ]) yy = un**2/(1-un)**3 + sum( [q**(2*n)*un**2/(1-q**n*un)**3 - q**n/un/(1-q**n/un)**3+q**n/(1-q**n)**2 for n in range(1,precn) ]) return self.curve(prec=prec)( [xx,yy] ) # From here on all function need that the curve has split multiplicative reduction. def L_invariant(self,prec=20): r""" Returns the *mysterious* `\mathcal{L}`-invariant associated to an elliptic curve with split multiplicative reduction. One instance where this constant appears is in the exceptional case of the `p`-adic Birch and Swinnerton-Dyer conjecture as formulated in [MTT]. See [Col] for a detailed discussion. INPUT: - ``prec`` - the `p`-adic precision, default is 20. REFERENCES: - [MTT] <NAME>, <NAME>, and <NAME>, On `p`-adic analogues of the conjectures of Birch and Swinnerton-Dyer, Inventiones mathematicae 84, (1986), 1-48. - [Col] <NAME>, Invariant `\mathcal{L}` et derivees de valeurs propores de Frobenius, preprint, 2004. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.L_invariant(prec=10) 5^3 + 4*5^4 + 2*5^5 + 2*5^6 + 2*5^7 + 3*5^8 + 5^9 + O(5^10) """ if not self.is_split(): raise RuntimeError, "The curve must have split multiplicative reduction" qE = self.parameter(prec=prec) n = qE.valuation() u = qE/self._p**n # the p-adic logarithm of Iwasawa normalised by log(p) = 0 return log(u)/n def _isomorphism(self,prec=20): r""" Returns the isomorphism between ``self.curve()`` and the given curve in the form of a list ``[u,r,s,t]`` of `p`-adic numbers. For this to exist the given curve has to have split multiplicative reduction over `\QQ_p`. More precisely, if `E` has coordinates `x` and `y` and the Tate curve has coordinates `X`, `Y` with `Y^2 + XY = X^3 + s_4 X +s_6` then `X = u^2 x +r` and `Y = u^3 y +s u^2 x +t`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._isomorphism(prec=5) [2 + 3*5^2 + 2*5^3 + 4*5^4 + O(5^5), 4 + 3*5 + 4*5^2 + 2*5^3 + O(5^5), 3 + 2*5 + 5^2 + 5^3 + 2*5^4 + O(5^5), 2 + 5 + 3*5^2 + 5^3 + 5^4 + O(5^5)] """ if not self.is_split(): raise RuntimeError, "The curve must have split multiplicative reduction" Csq = self._Csquare(prec=prec+4) C = Csq.sqrt() R = Qp(self._p,prec) C = R(C) s = (C * R(self._E.a1()) -R(1))/R(2) r = (C**2*R(self._E.a2()) +s +s**2)/R(3) t = (C**3*R(self._E.a3()) - r)/R(2) return [C,r,s,t] def _inverse_isomorphism(self,prec=20): r""" Returns the isomorphism between the given curve and ``self.curve()`` in the form of a list ``[u,r,s,t]`` of `p`-adic numbers. For this to exist the given curve has to have split multiplicative reduction over `\QQ_p`. More precisely, if `E` has coordinates `x` and `y` and the Tate curve has coordinates `X`, `Y` with `Y^2 + XY = X^3 + s_4 X +s_6` then `x = u^2 X +r` and `y = u^3 Y +s u^2 X +t`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._inverse_isomorphism(prec=5) [3 + 2*5 + 3*5^3 + O(5^5), 4 + 2*5 + 4*5^3 + 3*5^4 + O(5^5), 1 + 5 + 4*5^3 + 2*5^4 + O(5^5), 5 + 2*5^2 + 3*5^4 + O(5^5)] """ if not self.is_split(): raise RuntimeError, "The curve must have split multiplicative reduction" vec = self._isomorphism(prec=prec) return [1/vec[0],-vec[1]/vec[0]**2,-vec[2]/vec[0],(vec[1]*vec[2]-vec[3])/vec[0]**3] def lift(self,P, prec = 20): r""" Given a point `P` in the formal group of the elliptic curve `E` with split multiplicative reduction, this produces an element `u` in `\QQ_p^{\times}` mapped to the point `P` by the Tate parametrisation. The algorithm return the unique such element in `1+p\ZZ_p`. INPUT: - ``P`` - a point on the elliptic curve. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5) sage: P = e([-6,10]) sage: l = eq.lift(12*P, prec=10); l 1 + 4*5 + 5^3 + 5^4 + 4*5^5 + 5^6 + 5^7 + 4*5^8 + 5^9 + O(5^10) Now we map the lift l back and check that it is indeed right.:: sage: eq.parametrisation_onto_original_curve(l) (4*5^-2 + 2*5^-1 + 4*5 + 3*5^3 + 5^4 + 2*5^5 + 4*5^6 + O(5^7) : 2*5^-3 + 5^-1 + 4 + 4*5 + 5^2 + 3*5^3 + 4*5^4 + O(5^6) : 1 + O(5^20)) sage: e5 = e.change_ring(Qp(5,9)) sage: e5(12*P) (4*5^-2 + 2*5^-1 + 4*5 + 3*5^3 + 5^4 + 2*5^5 + 4*5^6 + O(5^7) : 2*5^-3 + 5^-1 + 4 + 4*5 + 5^2 + 3*5^3 + 4*5^4 + O(5^6) : 1 + O(5^9)) """ p = self._p R = Qp(self._p,prec) if not self._E == P.curve(): raise ValueError , "The point must lie on the original curve." if not self.is_split(): raise ValueError, "The curve must have split multiplicative reduction." if P.is_zero(): return R(1) if P[0].valuation(p) >= 0: raise ValueError , "The point must lie in the formal group." Eq = self.curve(prec=prec) isom = self._isomorphism(prec=prec) C = isom[0] r = isom[1] s = isom[2] t = isom[3] xx = r + C**2 * P[0] yy = t + s * C**2 * P[0] + C**3 * P[1] try: Pq = Eq([xx,yy]) except StandardError: raise RuntimeError, "Bug : Point %s does not lie on the curve "%[xx,yy] tt = -xx/yy eqhat = Eq.formal() eqlog = eqhat.log(prec + 3) z = eqlog(tt) u = ZZ(1) fac = ZZ(1) for i in range(1,2*prec+1): fac = fac * i u = u + z**i/fac return u def parametrisation_onto_original_curve(self,u,prec=20): r""" Given an element `u` in `\QQ_p^{\times}`, this computes its image on the original curve under the `p`-adic uniformisation of `E`. INPUT: - ``u`` - a non-zero `p`-adic number. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parametrisation_onto_original_curve(1+5+5^2+O(5^10)) (4*5^-2 + 4*5^-1 + 4 + 2*5^3 + 3*5^4 + 2*5^6 + O(5^7) : 3*5^-3 + 5^-2 + 4*5^-1 + 1 + 4*5 + 5^2 + 3*5^5 + O(5^6) : 1 + O(5^20)) Here is how one gets a 4-torsion point on `E` over `\QQ_5`:: sage: R = Qp(5,10) sage: i = R(-1).sqrt() sage: T = eq.parametrisation_onto_original_curve(i); T (2 + 3*5 + 4*5^2 + 2*5^3 + 5^4 + 4*5^5 + 2*5^7 + 5^8 + 5^9 + O(5^10) : 3*5 + 5^2 + 5^4 + 3*5^5 + 3*5^7 + 2*5^8 + 4*5^9 + O(5^10) : 1 + O(5^20)) sage: 4*T (0 : 1 + O(5^20) : 0) """ if not self.is_split(): raise ValueError, "The curve must have split multiplicative reduction." P = self.parametrisation_onto_tate_curve(u,prec=20) isom = self._inverse_isomorphism(prec=prec) C = isom[0] r = isom[1] s = isom[2] t = isom[3] xx = r + C**2 * P[0] yy = t + s * C**2 * P[0] + C**3 * P[1] R = Qp(self._p,prec) E_over_Qp = self._E.base_extend(R) return E_over_Qp([xx,yy]) __padic_sigma_square = lambda e,u,prec: (u-1)**2/u* prod([((1-e._q**n*u)*(1-e._q**n/u)/(1-e._q**n)**2)**2 for n in range(1,prec+1)]) # the following functions are rather functions of the global curve than the local curve # we use the same names as for elliptic curves over rationals. def padic_height(self,prec=20): r""" Returns the canonical `p`-adic height function on the original curve. INPUT: - ``prec`` - the `p`-adic precision, default is 20. OUTPUT: - A function that can be evaluated on rational points of `E`. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5) sage: h = eq.padic_height(prec=10) sage: P=e.gens()[0] sage: h(P) 2*5^-1 + 1 + 2*5 + 2*5^2 + 3*5^3 + 3*5^6 + 5^7 + O(5^8) Check that it is a quadratic function:: sage: h(3*P)-3^2*h(P) O(5^8) """ if not self.is_split(): raise NotImplementedError, "The curve must have split multiplicative reduction" p = self._p # we will have to do it properly with David Harvey's _multiply_point(E, R, Q) n = LCM(self._E.tamagawa_numbers()) * (p-1) # this function is a closure, I don't see how to doctest it (PZ) def _height(P,check=True): if check: assert P.curve() == self._E, "the point P must lie on the curve from which the height function was created" Q = n * P cQ = denominator(Q[0]) uQ = self.lift(Q,prec = prec) si = self.__padic_sigma_square(uQ, prec=prec) nn = self._q.valuation() qEu = self._q/p**nn return -(log(si*self._Csquare()/cQ) + log(uQ)**2/log(qEu)) / n**2 return _height def padic_regulator(self,prec=20): r""" Computes the canonical `p`-adic regulator on the extended Mordell-Weil group as in [MTT] (with the correction of [Wer] and sign convention in [SW].) The `p`-adic Birch and Swinnerton-Dyer conjecture predicts that this value appears in the formula for the leading term of the `p`-adic L-function. INPUT: - ``prec`` - the `p`-adic precision, default is 20. REFERENCES: - [MTT] <NAME>, <NAME>, and <NAME>, On `p`-adic analogues of the conjectures of Birch and Swinnerton-Dyer, Inventiones mathematicae 84, (1986), 1-48. - [Wer] <NAME>, Local heights on abelian varieties and rigid analytic unifomization, Doc. Math. 3 (1998), 301-319. - [SW] <NAME> and <NAME>, Computations About Tate-Shafarevich Groups using Iwasawa theory, preprint 2009. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.padic_regulator() 2*5^-1 + 1 + 2*5 + 2*5^2 + 3*5^3 + 3*5^6 + 5^7 + 3*5^9 + 3*5^10 + 3*5^12 + 4*5^13 + 3*5^15 + 2*5^16 + 3*5^18 + 4*5^19 + O(5^20) """ prec = prec + 4 K = Qp(self._p, prec=prec) rank = self._E.rank() if rank == 0: return K(1) if not self.is_split(): raise NotImplementedError, "The p-adic regulator is not implemented for non-split multiplicative reduction." basis = self._E.gens() M = matrix.matrix(K, rank, rank, 0) height = self.padic_height(prec= prec) point_height = [height(P) for P in basis] for i in range(rank): for j in range(i+1, rank): M[i, j] = M[j, i] = (- point_height[i] - point_height[j] + height(basis[i] + basis[j]))/2 for i in range(rank): M[i,i] = point_height[i] return M.determinant()
en
0.612424
Tate's parametrisation of `p`-adic curves with multiplicative reduction Let `E` be an elliptic curve defined over the `p`-adic numbers `\QQ_p`. Suppose that `E` has multiplicative reduction, i.e. that the `j`-invariant of `E` has negative valuation, say `n`. Then there exists a parameter `q` in `\ZZ_p` of valuation `n` such that the points of `E` defined over the algebraic closure `\bar{\QQ}_p` are in bijection with `\bar{\QQ}_p^{\times}\,/\, q^{\ZZ}`. More precisely there exists the series `s_4(q)` and `s_6(q)` such that the `y^2+x y = x^3 + s_4(q) x+s_6(q)` curve is isomorphic to `E` over `\bar{\QQ}_p` (or over `\QQ_p` if the reduction is *split* multiplicative). There is `p`-adic analytic map from `\bar{\QQ}^{\times}_p` to this curve with kernel `q^{\ZZ}`. Points of good reduction correspond to points of valuation `0` in `\bar{\QQ}^{\times}_p`. See chapter V of [Sil2] for more details. REFERENCES : - [Sil2] Silverman Joseph, Advanced Topics in the Arithmetic of Elliptic Curves, GTM 151, Springer 1994. AUTHORS: - <NAME> (23/05/2007): first version - <NAME> (2007-05-29): added some examples; editing. - <NAME> (04/09): reformatted docstrings. ###################################################################### # Copyright (C) 2007 <NAME> # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ ###################################################################### Tate's `p`-adic uniformisation of an elliptic curve with multiplicative reduction. .. note:: Some of the methods of this Tate curve only work when the reduction is split multiplicative over `\QQ_p`. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5); eq 5-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field sage: eq == loads(dumps(eq)) True REFERENCES : - [Sil2] <NAME>, Advanced Topics in the Arithmetic of Elliptic Curves, GTM 151, Springer 1994. INPUT: - ``E`` - an elliptic curve over the rational numbers - ``p`` - a prime where `E` has multiplicative reduction, i.e., such that `j(E)` has negative valuation. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(2); eq 2-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field Compare self and other. TESTS:: sage: E = EllipticCurve('35a') sage: eq5 = E.tate_curve(5) sage: eq7 = E.tate_curve(7) sage: eq7 == eq7 True sage: eq7 == eq5 False Return print representation. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(2) sage: eq._repr_() '2-adic Tate curve associated to the Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field' Returns the elliptic curve the Tate curve was constructed from. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.original_curve() Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field Returns the residual characteristic `p`. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.original_curve() Elliptic Curve defined by y^2 + x*y + y = x^3 - 33*x + 68 over Rational Field sage: eq.prime() 5 Returns the Tate parameter `q` such that the curve is isomorphic over the algebraic closure of `\QQ_p` to the curve `\QQ_p^{\times}/q^{\ZZ}`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parameter(prec=5) 3*5^3 + 3*5^4 + 2*5^5 + 2*5^6 + 3*5^7 + O(5^8) Returns the `p`-adic elliptic curve of the form `y^2+x y = x^3 + s_4 x+s_6`. This curve with split multiplicative reduction is isomorphic to the given curve over the algebraic closure of `\QQ_p`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.curve(prec=5) Elliptic Curve defined by y^2 + (1+O(5^5))*x*y = x^3 + (2*5^4+5^5+2*5^6+5^7+3*5^8+O(5^9))*x + (2*5^3+5^4+2*5^5+5^7+O(5^8)) over 5-adic Field with capped relative precision 5 Returns the square of the constant `C` such that the canonical Neron differential `\omega` and the canonical differential `\frac{du}{u}` on `\QQ^{\times}/q^{\ZZ}` are linked by `\omega = C \frac{du}{u}`. This constant is only a square in `\QQ_p` if the curve has split multiplicative reduction. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._Csquare(prec=5) 4 + 2*5^2 + 2*5^4 + O(5^5) Returns the value of the `p`-adic Eisenstein series of weight 2 evaluated on the elliptic curve having split multiplicative reduction. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.E2(prec=10) 4 + 2*5^2 + 2*5^3 + 5^4 + 2*5^5 + 5^7 + 5^8 + 2*5^9 + O(5^10) sage: T = EllipticCurve('14').tate_curve(7) sage: T.E2(30) 2 + 4*7 + 7^2 + 3*7^3 + 6*7^4 + 5*7^5 + 2*7^6 + 7^7 + 5*7^8 + 6*7^9 + 5*7^10 + 2*7^11 + 6*7^12 + 4*7^13 + 3*7^15 + 5*7^16 + 4*7^17 + 4*7^18 + 2*7^20 + 7^21 + 5*7^22 + 4*7^23 + 4*7^24 + 3*7^25 + 6*7^26 + 3*7^27 + 6*7^28 + O(7^30) Returns True if the given elliptic curve has split multiplicative reduction. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.is_split() True sage: eq = EllipticCurve('37a1').tate_curve(37) sage: eq.is_split() False Given an element `u` in `\QQ_p^{\times}`, this computes its image on the Tate curve under the `p`-adic uniformisation of `E`. INPUT: - ``u`` - a non-zero `p`-adic number. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parametrisation_onto_tate_curve(1+5+5^2+O(5^10)) (5^-2 + 4*5^-1 + 1 + 2*5 + 3*5^2 + 2*5^5 + 3*5^6 + O(5^7) : 4*5^-3 + 2*5^-1 + 4 + 2*5 + 3*5^4 + 2*5^5 + O(5^6) : 1 + O(5^20)) # formulas in Silverman II (Advanced Topics in the Arithmetic of Elliptic curves, p. 425) # From here on all function need that the curve has split multiplicative reduction. Returns the *mysterious* `\mathcal{L}`-invariant associated to an elliptic curve with split multiplicative reduction. One instance where this constant appears is in the exceptional case of the `p`-adic Birch and Swinnerton-Dyer conjecture as formulated in [MTT]. See [Col] for a detailed discussion. INPUT: - ``prec`` - the `p`-adic precision, default is 20. REFERENCES: - [MTT] <NAME>, <NAME>, and <NAME>, On `p`-adic analogues of the conjectures of Birch and Swinnerton-Dyer, Inventiones mathematicae 84, (1986), 1-48. - [Col] <NAME>, Invariant `\mathcal{L}` et derivees de valeurs propores de Frobenius, preprint, 2004. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.L_invariant(prec=10) 5^3 + 4*5^4 + 2*5^5 + 2*5^6 + 2*5^7 + 3*5^8 + 5^9 + O(5^10) # the p-adic logarithm of Iwasawa normalised by log(p) = 0 Returns the isomorphism between ``self.curve()`` and the given curve in the form of a list ``[u,r,s,t]`` of `p`-adic numbers. For this to exist the given curve has to have split multiplicative reduction over `\QQ_p`. More precisely, if `E` has coordinates `x` and `y` and the Tate curve has coordinates `X`, `Y` with `Y^2 + XY = X^3 + s_4 X +s_6` then `X = u^2 x +r` and `Y = u^3 y +s u^2 x +t`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._isomorphism(prec=5) [2 + 3*5^2 + 2*5^3 + 4*5^4 + O(5^5), 4 + 3*5 + 4*5^2 + 2*5^3 + O(5^5), 3 + 2*5 + 5^2 + 5^3 + 2*5^4 + O(5^5), 2 + 5 + 3*5^2 + 5^3 + 5^4 + O(5^5)] Returns the isomorphism between the given curve and ``self.curve()`` in the form of a list ``[u,r,s,t]`` of `p`-adic numbers. For this to exist the given curve has to have split multiplicative reduction over `\QQ_p`. More precisely, if `E` has coordinates `x` and `y` and the Tate curve has coordinates `X`, `Y` with `Y^2 + XY = X^3 + s_4 X +s_6` then `x = u^2 X +r` and `y = u^3 Y +s u^2 X +t`. INPUT: - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq._inverse_isomorphism(prec=5) [3 + 2*5 + 3*5^3 + O(5^5), 4 + 2*5 + 4*5^3 + 3*5^4 + O(5^5), 1 + 5 + 4*5^3 + 2*5^4 + O(5^5), 5 + 2*5^2 + 3*5^4 + O(5^5)] Given a point `P` in the formal group of the elliptic curve `E` with split multiplicative reduction, this produces an element `u` in `\QQ_p^{\times}` mapped to the point `P` by the Tate parametrisation. The algorithm return the unique such element in `1+p\ZZ_p`. INPUT: - ``P`` - a point on the elliptic curve. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5) sage: P = e([-6,10]) sage: l = eq.lift(12*P, prec=10); l 1 + 4*5 + 5^3 + 5^4 + 4*5^5 + 5^6 + 5^7 + 4*5^8 + 5^9 + O(5^10) Now we map the lift l back and check that it is indeed right.:: sage: eq.parametrisation_onto_original_curve(l) (4*5^-2 + 2*5^-1 + 4*5 + 3*5^3 + 5^4 + 2*5^5 + 4*5^6 + O(5^7) : 2*5^-3 + 5^-1 + 4 + 4*5 + 5^2 + 3*5^3 + 4*5^4 + O(5^6) : 1 + O(5^20)) sage: e5 = e.change_ring(Qp(5,9)) sage: e5(12*P) (4*5^-2 + 2*5^-1 + 4*5 + 3*5^3 + 5^4 + 2*5^5 + 4*5^6 + O(5^7) : 2*5^-3 + 5^-1 + 4 + 4*5 + 5^2 + 3*5^3 + 4*5^4 + O(5^6) : 1 + O(5^9)) Given an element `u` in `\QQ_p^{\times}`, this computes its image on the original curve under the `p`-adic uniformisation of `E`. INPUT: - ``u`` - a non-zero `p`-adic number. - ``prec`` - the `p`-adic precision, default is 20. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.parametrisation_onto_original_curve(1+5+5^2+O(5^10)) (4*5^-2 + 4*5^-1 + 4 + 2*5^3 + 3*5^4 + 2*5^6 + O(5^7) : 3*5^-3 + 5^-2 + 4*5^-1 + 1 + 4*5 + 5^2 + 3*5^5 + O(5^6) : 1 + O(5^20)) Here is how one gets a 4-torsion point on `E` over `\QQ_5`:: sage: R = Qp(5,10) sage: i = R(-1).sqrt() sage: T = eq.parametrisation_onto_original_curve(i); T (2 + 3*5 + 4*5^2 + 2*5^3 + 5^4 + 4*5^5 + 2*5^7 + 5^8 + 5^9 + O(5^10) : 3*5 + 5^2 + 5^4 + 3*5^5 + 3*5^7 + 2*5^8 + 4*5^9 + O(5^10) : 1 + O(5^20)) sage: 4*T (0 : 1 + O(5^20) : 0) # the following functions are rather functions of the global curve than the local curve # we use the same names as for elliptic curves over rationals. Returns the canonical `p`-adic height function on the original curve. INPUT: - ``prec`` - the `p`-adic precision, default is 20. OUTPUT: - A function that can be evaluated on rational points of `E`. EXAMPLES:: sage: e = EllipticCurve('130a1') sage: eq = e.tate_curve(5) sage: h = eq.padic_height(prec=10) sage: P=e.gens()[0] sage: h(P) 2*5^-1 + 1 + 2*5 + 2*5^2 + 3*5^3 + 3*5^6 + 5^7 + O(5^8) Check that it is a quadratic function:: sage: h(3*P)-3^2*h(P) O(5^8) # we will have to do it properly with David Harvey's _multiply_point(E, R, Q) # this function is a closure, I don't see how to doctest it (PZ) Computes the canonical `p`-adic regulator on the extended Mordell-Weil group as in [MTT] (with the correction of [Wer] and sign convention in [SW].) The `p`-adic Birch and Swinnerton-Dyer conjecture predicts that this value appears in the formula for the leading term of the `p`-adic L-function. INPUT: - ``prec`` - the `p`-adic precision, default is 20. REFERENCES: - [MTT] <NAME>, <NAME>, and <NAME>, On `p`-adic analogues of the conjectures of Birch and Swinnerton-Dyer, Inventiones mathematicae 84, (1986), 1-48. - [Wer] <NAME>, Local heights on abelian varieties and rigid analytic unifomization, Doc. Math. 3 (1998), 301-319. - [SW] <NAME> and <NAME>, Computations About Tate-Shafarevich Groups using Iwasawa theory, preprint 2009. EXAMPLES:: sage: eq = EllipticCurve('130a1').tate_curve(5) sage: eq.padic_regulator() 2*5^-1 + 1 + 2*5 + 2*5^2 + 3*5^3 + 3*5^6 + 5^7 + 3*5^9 + 3*5^10 + 3*5^12 + 4*5^13 + 3*5^15 + 2*5^16 + 3*5^18 + 4*5^19 + O(5^20)
2.440238
2
tvrenamer/constants.py
shad7/tvrenamer
1
6629794
"""Application constants.""" INIT = 'initialized' PREPARSE = 'parsing' POSTPARSE = 'parsed' PREENHANCE = 'enhancing' POSTENHANCE = 'enhanced' PREFORMAT = 'formatting' POSTFORMAT = 'formatted' PRENAME = 'renaming' POSTNAME = 'renamed' DONE = 'finished' FAILED = 'failed'
"""Application constants.""" INIT = 'initialized' PREPARSE = 'parsing' POSTPARSE = 'parsed' PREENHANCE = 'enhancing' POSTENHANCE = 'enhanced' PREFORMAT = 'formatting' POSTFORMAT = 'formatted' PRENAME = 'renaming' POSTNAME = 'renamed' DONE = 'finished' FAILED = 'failed'
en
0.651379
Application constants.
1.243297
1
MIDAS/CountMinSketch.py
kimMichael/MIDAS.Python
0
6629795
# ------------------------------------------------------------------------------ # Copyright 2020 <NAME> (@liurui39660) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ from random import randint from typing import List class CountMinSketch: def __init__(self, numRow: int, numColumn: int) -> None: super().__init__() self.r: int = numRow self.c: int = numColumn self.m: int = 104729 # The same magic number in C++ implementation self.lenData: int = self.r * self.c self.param1 = [randint(0, 0x7fff) + 1 for _ in range(self.r)] self.param2 = [randint(0, 0x7fff) for _ in range(self.r)] self.data = [0.] * self.lenData def ClearAll(self, with_: float = 0) -> None: self.data = [with_] * self.lenData # Faster than for loop def MultiplyAll(self, by: float) -> None: for i in range(self.lenData): # Faster than list(map(...)) self.data[i] *= by def Hash(self, indexOut: List[int], a: int, b: int = 0) -> None: for i in range(self.r): # Faster than using two maps indexOut[i] = ((a + self.m * b) * self.param1[i] + self.param2[i]) % self.c indexOut[i] += i * self.c + (self.c if indexOut[i] < 0 else 0) def __call__(self, index: List[int]) -> float: return min(map(lambda i: self.data[i], index)) # Faster than for loop def Assign(self, index: List[int], with_: float) -> float: for i in index: self.data[i] = with_ return with_ def Add(self, index: List[int], by: float = 1) -> None: for i in index: self.data[i] += by
# ------------------------------------------------------------------------------ # Copyright 2020 <NAME> (@liurui39660) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ from random import randint from typing import List class CountMinSketch: def __init__(self, numRow: int, numColumn: int) -> None: super().__init__() self.r: int = numRow self.c: int = numColumn self.m: int = 104729 # The same magic number in C++ implementation self.lenData: int = self.r * self.c self.param1 = [randint(0, 0x7fff) + 1 for _ in range(self.r)] self.param2 = [randint(0, 0x7fff) for _ in range(self.r)] self.data = [0.] * self.lenData def ClearAll(self, with_: float = 0) -> None: self.data = [with_] * self.lenData # Faster than for loop def MultiplyAll(self, by: float) -> None: for i in range(self.lenData): # Faster than list(map(...)) self.data[i] *= by def Hash(self, indexOut: List[int], a: int, b: int = 0) -> None: for i in range(self.r): # Faster than using two maps indexOut[i] = ((a + self.m * b) * self.param1[i] + self.param2[i]) % self.c indexOut[i] += i * self.c + (self.c if indexOut[i] < 0 else 0) def __call__(self, index: List[int]) -> float: return min(map(lambda i: self.data[i], index)) # Faster than for loop def Assign(self, index: List[int], with_: float) -> float: for i in index: self.data[i] = with_ return with_ def Add(self, index: List[int], by: float = 1) -> None: for i in index: self.data[i] += by
en
0.721812
# ------------------------------------------------------------------------------ # Copyright 2020 <NAME> (@liurui39660) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ # The same magic number in C++ implementation # Faster than for loop # Faster than list(map(...)) # Faster than using two maps # Faster than for loop
2.987072
3
gal/views.py
BrilliantGrant/unsplash
0
6629796
from django.shortcuts import render,HttpResponse from .models import Images,Category from django.template.context_processors import request # Create your views here. def index(request): image = Images.get_images() return render(request,'index.html',{"image":image}) def image(request, image_id): image = Images.objects.get(id=image_id) return render(request, 'image.html', {'image':image}) def search_results(request): if 'category' in request.GET and request.GET['category']: search_term = request.GET.get('category') searched_category = Images.search_by_category(search_term) message = f"{search_term}" return render(request, 'search.html', {"message":message, "categories":searched_category}) else: message = 'You haven\'t searched for any category.' return render(request, 'search.html', {"message":message})
from django.shortcuts import render,HttpResponse from .models import Images,Category from django.template.context_processors import request # Create your views here. def index(request): image = Images.get_images() return render(request,'index.html',{"image":image}) def image(request, image_id): image = Images.objects.get(id=image_id) return render(request, 'image.html', {'image':image}) def search_results(request): if 'category' in request.GET and request.GET['category']: search_term = request.GET.get('category') searched_category = Images.search_by_category(search_term) message = f"{search_term}" return render(request, 'search.html', {"message":message, "categories":searched_category}) else: message = 'You haven\'t searched for any category.' return render(request, 'search.html', {"message":message})
en
0.968116
# Create your views here.
2.179011
2
testkb/m.py
happyseayou/tello_the_force
1
6629797
<reponame>happyseayou/tello_the_force<gh_stars>1-10 import os import cv2 import gc import time from multiprocessing import Process, Manager # 向共享缓冲栈中写入数据: def write(stack, cam, top: int) -> None: print('Process to write: %s' % os.getpid()) cap = cv2.VideoCapture(cam) while True: _, img = cap.read() if _: stack.append(img) # 每到一定容量清空一次缓冲栈 # 利用gc库,手动清理内存垃圾,防止内存溢出 if len(stack) >= top: del stack[:] gc.collect() # 在缓冲栈中读取数据: def read(stack) -> None: #提醒返回值是一个None print('Process to read: %s' % os.getpid()) index = 0 fourcc = cv2.VideoWriter_fourcc(*'avc1') #MPEG-4.2q out = cv2.VideoWriter('video_out.mp4',fourcc , 25, (640, 480)) start_time = time.time() x = 1 # displays the frame rate every 1 second counter = 0 print("开始逐帧读取") while True: # print("正在读取第%d帧:" %index)s if len(stack) >= 10: frame = stack.pop() # 逐帧保存为图片 # resize_frame = cv2.resize(frame, (720, 480), interpolation=cv2.INTER_AREA) # cv2.imwrite("frame" + "%03d.jpg" % index,resize_frame ) index = index+1 #直接保存视频 out.write(frame) cv2.imshow("img", frame) #计算fps counter += 1 if (time.time() - start_time) > x: print("FPS: ", counter / (time.time() - start_time)) counter = 0 start_time = time.time() key = cv2.waitKey(1) & 0xFF if key == ord('q'): break else: continue out.release() cv2.destroyAllWindows() if __name__ == '__main__': # 父进程创建缓冲栈,并传给各个子进程: q = Manager().list() pw = Process(target=write, args=(q, 0, 100)) #海康威视视频流地址 pr = Process(target=read, args=(q,)) pw.start() pr.start() pr.join() pw.terminate()
import os import cv2 import gc import time from multiprocessing import Process, Manager # 向共享缓冲栈中写入数据: def write(stack, cam, top: int) -> None: print('Process to write: %s' % os.getpid()) cap = cv2.VideoCapture(cam) while True: _, img = cap.read() if _: stack.append(img) # 每到一定容量清空一次缓冲栈 # 利用gc库,手动清理内存垃圾,防止内存溢出 if len(stack) >= top: del stack[:] gc.collect() # 在缓冲栈中读取数据: def read(stack) -> None: #提醒返回值是一个None print('Process to read: %s' % os.getpid()) index = 0 fourcc = cv2.VideoWriter_fourcc(*'avc1') #MPEG-4.2q out = cv2.VideoWriter('video_out.mp4',fourcc , 25, (640, 480)) start_time = time.time() x = 1 # displays the frame rate every 1 second counter = 0 print("开始逐帧读取") while True: # print("正在读取第%d帧:" %index)s if len(stack) >= 10: frame = stack.pop() # 逐帧保存为图片 # resize_frame = cv2.resize(frame, (720, 480), interpolation=cv2.INTER_AREA) # cv2.imwrite("frame" + "%03d.jpg" % index,resize_frame ) index = index+1 #直接保存视频 out.write(frame) cv2.imshow("img", frame) #计算fps counter += 1 if (time.time() - start_time) > x: print("FPS: ", counter / (time.time() - start_time)) counter = 0 start_time = time.time() key = cv2.waitKey(1) & 0xFF if key == ord('q'): break else: continue out.release() cv2.destroyAllWindows() if __name__ == '__main__': # 父进程创建缓冲栈,并传给各个子进程: q = Manager().list() pw = Process(target=write, args=(q, 0, 100)) #海康威视视频流地址 pr = Process(target=read, args=(q,)) pw.start() pr.start() pr.join() pw.terminate()
zh
0.484877
# 向共享缓冲栈中写入数据: # 每到一定容量清空一次缓冲栈 # 利用gc库,手动清理内存垃圾,防止内存溢出 # 在缓冲栈中读取数据: #提醒返回值是一个None #MPEG-4.2q # displays the frame rate every 1 second # print("正在读取第%d帧:" %index)s # 逐帧保存为图片 # resize_frame = cv2.resize(frame, (720, 480), interpolation=cv2.INTER_AREA) # cv2.imwrite("frame" + "%03d.jpg" % index,resize_frame ) #直接保存视频 #计算fps # 父进程创建缓冲栈,并传给各个子进程: #海康威视视频流地址
2.51527
3
ml_project/src/train_pipeline.py
made-ml-in-prod-2021/liliyamakhmutova-
0
6629798
<filename>ml_project/src/train_pipeline.py import json import logging import logging.config import yaml import sys import click import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from src.data import read_data, split_train_val_data from src.enities.train_pipeline_params import ( TrainingPipelineParams, read_training_pipeline_params, ) from src.enities.model_params import ( read_model_logistic_regression_params, read_model_decision_tree_classifier_params, ) from src.features import make_features from src.features.build_features import extract_target, build_transformer, drop_columns from src.models import ( train_model, serialize_model, predict_model, evaluate_model, SklearnClassifierModel, ) NOT_ENOUGH_DATA_THRESHOLD = 50 APPLICATION_NAME = "ml_project" APPLICATION_NAME_WARNING = "ml_project_warning" DEFAULT_LOGGING_CONFIG_PATH = "configs/logging_config.yml" logger = logging.getLogger(APPLICATION_NAME) warning_logger = logging.getLogger(APPLICATION_NAME_WARNING) def train_pipeline(training_pipeline_params: TrainingPipelineParams, model: SklearnClassifierModel): logger.info(f"start train pipeline with params {training_pipeline_params}") data = read_data(training_pipeline_params.input_data_path) logger.info(f"data.shape is {data.shape}") data = drop_columns(data, training_pipeline_params.feature_params) logger.info(f"data.shape after dropping some columns is {data.shape}") train_df, val_df = split_train_val_data( data, training_pipeline_params.splitting_params ) logger.info(f"train_df.shape is {train_df.shape}") logger.info(f"val_df.shape is {val_df.shape}") if train_df.shape[0] < NOT_ENOUGH_DATA_THRESHOLD: msg = "No enough data to build good model" logger.warning(msg) warning_logger.warning(msg) transformer = build_transformer(training_pipeline_params.feature_params) transformer.fit(train_df) train_features = make_features(transformer, train_df) train_target = extract_target(train_df, training_pipeline_params.feature_params) logger.info(f"train_features.shape is {train_features.shape}") model = train_model( train_features, train_target, model ) val_features = make_features(transformer, val_df) val_target = extract_target(val_df, training_pipeline_params.feature_params) logger.info(f"val_features.shape is {val_features.shape}") predicts = predict_model( model, val_features, training_pipeline_params.feature_params.use_log_trick, ) metrics = evaluate_model( predicts, val_target, use_log_trick=training_pipeline_params.feature_params.use_log_trick, ) with open(training_pipeline_params.metric_path, "w") as metric_file: json.dump(metrics, metric_file) logger.info(f"metrics is {metrics}") path_to_model = serialize_model(model, training_pipeline_params.output_model_path) return path_to_model, metrics @click.command(name="train_pipeline") @click.argument("config_path") def train_pipeline_command(config_path: str): params = read_training_pipeline_params(config_path) model_type = params.train_params.model_type logger.info(f"model is {model_type}") if model_type == "LogisticRegression": model_params = read_model_logistic_regression_params(config_path) model = LogisticRegression( C=model_params.C, solver=model_params.solver, max_iter=model_params.max_iter, random_state=params.train_params.random_state ) elif model_type == "DecisionTreeClassifier": model_params = read_model_decision_tree_classifier_params(config_path) model = DecisionTreeClassifier( criterion=model_params.criterion, max_depth=model_params.max_depth, random_state=params.train_params.random_state) else: raise NotImplementedError() train_pipeline(params, model) def setup_logging(): with open(DEFAULT_LOGGING_CONFIG_PATH) as config_fin: logging.config.dictConfig(yaml.safe_load(config_fin)) if __name__ == "__main__": setup_logging() train_pipeline_command()
<filename>ml_project/src/train_pipeline.py import json import logging import logging.config import yaml import sys import click import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from src.data import read_data, split_train_val_data from src.enities.train_pipeline_params import ( TrainingPipelineParams, read_training_pipeline_params, ) from src.enities.model_params import ( read_model_logistic_regression_params, read_model_decision_tree_classifier_params, ) from src.features import make_features from src.features.build_features import extract_target, build_transformer, drop_columns from src.models import ( train_model, serialize_model, predict_model, evaluate_model, SklearnClassifierModel, ) NOT_ENOUGH_DATA_THRESHOLD = 50 APPLICATION_NAME = "ml_project" APPLICATION_NAME_WARNING = "ml_project_warning" DEFAULT_LOGGING_CONFIG_PATH = "configs/logging_config.yml" logger = logging.getLogger(APPLICATION_NAME) warning_logger = logging.getLogger(APPLICATION_NAME_WARNING) def train_pipeline(training_pipeline_params: TrainingPipelineParams, model: SklearnClassifierModel): logger.info(f"start train pipeline with params {training_pipeline_params}") data = read_data(training_pipeline_params.input_data_path) logger.info(f"data.shape is {data.shape}") data = drop_columns(data, training_pipeline_params.feature_params) logger.info(f"data.shape after dropping some columns is {data.shape}") train_df, val_df = split_train_val_data( data, training_pipeline_params.splitting_params ) logger.info(f"train_df.shape is {train_df.shape}") logger.info(f"val_df.shape is {val_df.shape}") if train_df.shape[0] < NOT_ENOUGH_DATA_THRESHOLD: msg = "No enough data to build good model" logger.warning(msg) warning_logger.warning(msg) transformer = build_transformer(training_pipeline_params.feature_params) transformer.fit(train_df) train_features = make_features(transformer, train_df) train_target = extract_target(train_df, training_pipeline_params.feature_params) logger.info(f"train_features.shape is {train_features.shape}") model = train_model( train_features, train_target, model ) val_features = make_features(transformer, val_df) val_target = extract_target(val_df, training_pipeline_params.feature_params) logger.info(f"val_features.shape is {val_features.shape}") predicts = predict_model( model, val_features, training_pipeline_params.feature_params.use_log_trick, ) metrics = evaluate_model( predicts, val_target, use_log_trick=training_pipeline_params.feature_params.use_log_trick, ) with open(training_pipeline_params.metric_path, "w") as metric_file: json.dump(metrics, metric_file) logger.info(f"metrics is {metrics}") path_to_model = serialize_model(model, training_pipeline_params.output_model_path) return path_to_model, metrics @click.command(name="train_pipeline") @click.argument("config_path") def train_pipeline_command(config_path: str): params = read_training_pipeline_params(config_path) model_type = params.train_params.model_type logger.info(f"model is {model_type}") if model_type == "LogisticRegression": model_params = read_model_logistic_regression_params(config_path) model = LogisticRegression( C=model_params.C, solver=model_params.solver, max_iter=model_params.max_iter, random_state=params.train_params.random_state ) elif model_type == "DecisionTreeClassifier": model_params = read_model_decision_tree_classifier_params(config_path) model = DecisionTreeClassifier( criterion=model_params.criterion, max_depth=model_params.max_depth, random_state=params.train_params.random_state) else: raise NotImplementedError() train_pipeline(params, model) def setup_logging(): with open(DEFAULT_LOGGING_CONFIG_PATH) as config_fin: logging.config.dictConfig(yaml.safe_load(config_fin)) if __name__ == "__main__": setup_logging() train_pipeline_command()
none
1
3.076148
3
gmshModel/Model/GenericModel.py
gawelk/F3DAS
45
6629799
<reponame>gawelk/F3DAS ################################################################################ # CLASS DEFINITION FOR MESHING MODELS GENERATED USING THE GMSH-PYTHON-API # ################################################################################ # Within this file, the generic Model class is defined. It is the base class # for other, more specific classes which aim to mesh models using the Gmsh- # Python-API. In addition to the methods defined within the Gmsh-Python-API, # this class provides methods for all basic steps of a model generation using # Gmsh: some of these methods are only placeholders here and - if required - # have to be specified/overwritten for the more specialized models. ########################### # Load required libraries # ########################### # Standard Python libraries import os # os for file handling (split extensions from file) import inspect # inspect to search for classes in modules import datetime as dt # datetime for time stamps import copy as cp # copy for deepcopies of arrays import tempfile as tf # tempfile for generation of temprory files and folders import numpy as np # numpy for array computations import pickle # pickle for saving and loading of gmshModels import logging # logging for log messages logger=logging.getLogger(__name__) # set logger # additional program libraries import gmsh # Gmsh Python-API import meshio # meshio for mesh file format conversions # self-defined class definitions and modules from ..Geometry import GeometricObjects as geomObj # classes for implemented geometric objects from ..Visualization.GeometryVisualization import GeometryVisualization, PYTHONOCC_AVAILABLE # class for geometry visualization from ..Visualization.MeshVisualization import MeshVisualization # class for mesh visualization from ..MeshExport import FeapExport ############################# # Set configuration options # ############################# SUPPORTED_GEOMETRY_FORMATS=[".brep", ".stp", ".step"] # set supported geometry formats SUPPORTED_MESH_FORMATS=list(meshio.extension_to_filetype.keys()) # set supported mesh file formats ############################# # Define GenericModel class # ############################# class GenericModel: """Generic class for meshing models generated using the Gmsh-Python-API This class provides the basic mesh generation framework for Gmsh. It implements the methods for: (1) Setting up a geometry using basic geometric entities and boolean operations (2) Adding the geometric objects to Gmsh, performing the boolean operations, defining physical groups (3) creating a mesh with user-defined refinement fields (4) saving and visualizing the mesh Some of the methods used within the framework are only defined as placeholder methods here and have to be specified in detail within the child classes. Attributes: ----------- dimension: int dimension of the model instance modelName: string name of the Gmsh model and default name for all resulting files gmshConfigChanges: dict dictionary for user updates of the default Gmsh configuration geometricObjects: list list containing the instances of geometric objects used for the model geometry creation groups: dict dictionary with group information for the model entities booleanOperations: list list with dictionaries defining the individual boolean operations to perform for the model generation physicalGroups: list list with dictionary defining which Gmsh entities are defined as physical groups (e.g. different materials) refinementFields: list list of dictionaries defining the refinement fields that have to be added to the Gmsh model backgroundField: int number of the field that has to be used as the background field/mesh for the mesh generation """ ######################### # Initialization method # ######################### def __init__(self,dimension=None,gmshConfigChanges={}): """Initialization method of a generic GmshModel object Parameters: ----------- dimension: int dimension of the model instance gmshConfigChanges: dict dictionary for user updates of the default Gmsh configuration """ # set unique model name self.modelName="Model_"+dt.datetime.now().strftime("%Y%m%d_%H%M%S%f") # use time stamp of initialization (with microseconds) for unique model name # set default file extensions depending on the savin method self._defaultFileExts={ "Geometry": ".brep", # use ".brep" as default extension for saving the geometry "Mesh": ".msh", # use ".msh" as default extension for saving meshes "Model": ".gmshModel", # use ".gmshModel" as default extension for saving models "Misc": ".txt" # use ".txt" as default extension for miscellaneous information } # initialize Gmsh-Python-API self.gmshConfigChanges={ # default Gmsh configuration changes "General.Terminal": 0, # deactivate console output by default (only activated for mesh generation) "Geometry.OCCBoundsUseStl": 1, # use more accurate computation of bounding boxes (slower but advantegous for periodicity constraints) "Geometry.Tolerance": 1e-12 # adjust geometric tolerance to be a little more precise then default (1e-8) } self.gmshAPI=self.initializeGmsh(gmshConfigChanges) # this assignment facilitates the usage of all methods provided by the gmsh.model class # initialize attributes that all instances of GenericModel should have self.dimension=dimension # set (highest) dimension of the model self.geometricObjects=[] # initialze empty list of geomtric objects (used for model generation) self.groups={} # initialize empty dictionary of groups (used for boolean operations adn physical groups) self.booleanOperations=[] # initialize empty list of defined boolean operations (used to generate the model from basic geometrical objects) self.physicalGroups=[] # initialize empty list of defined physical groups (used to identify materials and boundaries within the mesh) self.refinementFields=[] # initialize empty list of refinement fields (used to control the mesh sizes) self.backgroundField=None # initialize background field for the meshing algorithm (used to control the mesh sizes) ################################################################################ # MAIN METHODS FOR MODEL AND MESH GENERATION # ################################################################################ ############################################ # Method to initialize the Gmsh-Python-API # ############################################ def initializeGmsh(self,gmshConfigChanges={}): """Gmsh initialization method This method initializes the Gmsh-Python-API and adds it to the GmshModel Parameters: ----------- gmshConfigChanges: dict dictionary with Gmsh configuration options that have to be set """ gmsh.initialize('',False) # initialize Gmsh Python-API without using local .gmshrc configuration file self.updateGmshConfiguration(gmshConfigChanges) # update default configuration with user updates and set the options gmshAPI=gmsh.model # define gmshAPI as the model class of the Gmsh-Python-API (contains only static methods -> no instance required) gmshAPI.add(self.modelName) # add new model to the gmshAPI return gmshAPI # retrun gmshAPI ################################################################ # Method to set up model information and create the Gmsh model # ################################################################ def createGmshModel(self,**geometryOptions): """Method to create the Gmsh Model and provide necessary information to it This method contains the basic Gmsh model creation steps: after geoetric objects are defined, boolean operations are performed to generate the final geometry. Parts of the geometry are combined to physical groups in order to be able to assign, e.g., material parameters to them. If required, a periodicity constraint is finally added to the model. Parameters: ----------- geometryOptions: key-value pairs of options key-value pairs of options required for the geometry generation process """ # define geometric objects and add them to the Gmsh model self.defineGeometricObjects(**geometryOptions) # placeholder method: has to be specified/overwritten for the individual models self.addGeometricObjectsToGmshModel() # use Gmsh-API to add geometric information to the Gmsh model # define boolean operations and add them to the Gmsh model (perform them) self.defineBooleanOperations() # placeholder method: has to be specified/overwritten for the individual models self.performBooleanOperationsForGmshModel() # use Gmsh-API to perform defined operations # define physical groups and add them to the Gmsh model self.definePhysicalGroups() # placeholder method: has to be specified/overwritten for the individual models self.addPhysicalGroupsToGmshModel() # use Gmsh-API to add defined groups to the Gmsh model # set up periodicity constraints self.setupPeriodicity() # placeholder method: has to be specified/overwritten for the individual models if necessary #################################################################### # Method to calculate refinement information and generate the mesh # #################################################################### def createMesh(self,threads=None,refinementOptions={}): """Method to generate the model mesh This method contains the basic mesh generation steps for a Gmsh model: refinement fields are calculated with user-defined options and added to the model. Afterwards, a background field is specified and used for the mesh size computation within Gmsh. Finally, the mesh is created. Parameters: ----------- threads: int number of threads to use for the mesh generation refinementOptions: dict dictionary with user-defined options for the refinement field calculations """ if threads is not None: # set number of threads in Gmsh self.updateGmshConfiguration({"Mesh.MaxNumThreads1D": threads, "Mesh.MaxNumThreads2D": threads, "Mesh.MaxNumThreads3D": threads}) # deine refinement information and add them to the Gmsh model self.defineRefinementFields(refinementOptions=refinementOptions) # placeholder method: has to be specified/overwritten for the individual models self.addRefinementFieldsToGmshModel() # use Gmsh-API to add defined fields to the Gmsh model # set background field for meshing procedure (if possible) if not self.backgroundField is None: self.gmshAPI.mesh.field.setAsBackgroundMesh(self.backgroundField) # define background field (field which is used for mesh size determination) # generate mesh (with activate console output) self._gmshOutput(1) # activate Gmsh console output self.gmshAPI.mesh.generate(self.dimension) # generate mesh using the Gmsh-API self._gmshOutput(0) # deactivate Gmsh console output ############################################################ # Closing method to terminate the current Gmsh-API session # ############################################################ def close(self): """Gmsh finalization method The Gmsh-Python-API has to be finalized for a proper termination of the model. """ gmsh.finalize() ################################################################################ # MAIN METHODS FOR LOADING AND SAVING INFORMATION # ################################################################################ ######################################################## # Method to save the geometry of the model into a file # ######################################################## def saveGeometry(self,file=None): """Method to save the generated geometry into a geometry file This method allows to store geometry information into ".step" or ".brep"- files. """ # get fileparts of passed file string (return defaults if nothing is passed) fileDir,fileName,fileExt=self._getFileParts(file,"Geometry") if fileExt in SUPPORTED_GEOMETRY_FORMATS: # check if file extension is supported by gmsh gmsh.write(fileDir+"/"+fileName+fileExt) # write geometry to file (use Gmsh-internal "guess from extension" feature) else: raise ValueError("Unknown geometry file extension {}. The output geometry format must be supported by the gmsh library.".format(fileExt)) #################################################### # Method to save the mesh of the model into a file # #################################################### def saveMesh(self,file=None): """Method to save the generated mesh into a mesh file After the mesh is generated, it has to be saved into a usable file format. Here, all meshes that are supported by the meshio library, can be used to save the mesh. If meshio is not available, the mesh format is restricted """ # get fileparts of passed file string (return defaults if nothing is passed) fileDir,fileName,fileExt=self._getFileParts(file,"Mesh") # create mesh file depending on the chosen file extension os.makedirs(fileDir,exist_ok=True) # ensure that the file directory exists if fileExt == ".msh": # file extension is ".msh" gmsh.write(fileDir+"/"+fileName+fileExt) # -> save mesh using built-in gmsh.write method elif fileExt == '.msh2': # file extension is ".msh2" gmsh.option.setNumber("Mesh.MshFileVersion", 2.) # change format to msh2 gmsh.write(fileDir+"/"+fileName+".msh") # -> save mesh using built-in gmsh.write method elif fileExt == ".feap": # file extension is ".feap" -> write feap mesh files FeapExport(self) else: # file extension is different from ".msh" if fileExt in SUPPORTED_MESH_FORMATS: # -> check if file extension is supported by meshio with tf.TemporaryDirectory() as tmpDir: # ->-> create temporary directory tmpFile=tmpDir+"/"+self.modelName+".msh" # ->-> create temporary file gmshBinaryConfig=self.getGmshOption("Mesh.Binary") # ->-> get Gmsh configuration for binary mesh export self.setGmshOption("Mesh.Binary",1) # ->-> temporarily activate binary mesh export (reduce file size, increase speed) gmsh.write(tmpFile) # ->-> use built-in gmsh.write method to generate binary mesh in temporary folder self.setGmshOption("Mesh.Binary",gmshBinaryConfig) # ->-> reset Gmsh configuration self._convertMesh(tmpFile,fileDir+"/"+fileName+fileExt) # ->-> convert mesh to required file format else: # raise error if mesh file format is not supported by meshio raise ValueError("Unknown mesh file extension {}. The output mesh format must be supported by the meshio library.".format(fileExt)) ##################################################### # Method to save Gmsh model object to a binary file # ##################################################### def saveModel(self,file=None): """Method to save the complete model into a pickle object In order to be able to reuse generated models, the whole model can be saved. Within this method, the pickle module is used to save the model to a binary file. """ # get file parts of passed file string (return defaultsd if nothing is passed) fileDir,fileName,fileExt=self.__getFileParts(file,"Model") # save file os.makedirs(fileDir,exist_ok=True) # ensure that the file directory exists with open(fileDir+"/"+fileName+fileExt,"wb") as file: # open file with writing permissions in binary mode pickle.dump(self,file) # save file using pickle ################################################# # Class method to load existing model instances # ################################################# @classmethod # define method as a class method def load(cls,fileName): """Method to load an existing GmshModel object Objects that have been saved to a binary file using the pickle module can be reloaded here. """ with open("fileName","rb") as file: # load file return pickle.load(file) # load saved file with pickle module ################################################################################ # VISUALIZATION METHODS # ################################################################################ ########################################################## # Method to visualize the model geometry using pythonocc # ########################################################## def visualizeGeometry(self): """Method to visualize the Gmsh model geometry using pythonocc""" if PYTHONOCC_AVAILABLE: # optional pythonocc package is available GeometryVisualization(self) # -> visualize the geometry else: # optional pythonocc package is unavailable logger.warning("Geometry visualization is unavailabe due to missing packages.") # do nothing but printing a warning ############################################################ # Method to visualize the model mesh using pyvista and vtk # ############################################################ def visualizeMesh(self): """Method to visualize the generated mesh using pyvista and vtk""" MeshVisualization(self) # -> visualize the mesh #################################################################### # Method to show the whole model in Gmsh using the Gmsh-Python-API # #################################################################### def showModelInGmsh(self): """Method to open the complete model in Gmsh""" gmsh.fltk.run() ################################################################################ # PLACEHOLDER METHODS TO BE SPECIALIZED FOR THE INDIVIDUAL MODELS # ################################################################################ ################################################## # Method for the definition of geometric objects # ################################################## def defineGeometricObjects(self,**options): """Placeholder method for the definition of geometric objects. Has to be specified in child classes""" pass ################################################### # Method for the definition of boolean operations # ################################################### def defineBooleanOperations(self): """Placeholder method for the definition of necessary boolean Operations. Has to be specified in child classes""" pass ################################################ # Method for the definition of physical groups # ################################################ def definePhysicalGroups(self): """Placeholder method to define required physical groups. Has to be specified in child classes""" pass ########################################### # Method to define refinement information # ########################################### def defineRefinementFields(self): """Placeholder method to define/compute refinement fields for the mesh generation. Has to be specified in child classes""" pass ############################################################### # Method to set up periodicity constraints for the Gmsh model # ############################################################### def setupPeriodicity(self): """Placeholder method to set up periodicity constraints for RVEs. Has to be specified in child classes, if required""" pass ################################################################################ # INTERFACING METHODS TO PASS INFORMATION TO THE GMSH MODEL # ################################################################################ ############################################### # Method to transform Gmsh entity Tags to IDs # ############################################### def getIDsFromTags(self,tags): """Interfacing method to get Gmsh entity IDs from given list of Gmsh entityTags Parameters: ----------- tags: (list of) tuples list of Gmsh entity tag tuples tags=(entityDimension, entityID) """ _,IDs=map(list,zip(*tags)) # get IDs of tags-array (tag: (dimension,ID)-tuple) return IDs # return IDs ############################################################ # method to get a single Gmsh option depending on its type # ############################################################ def getGmshOption(self,optionName): """Method to get the value of a Gmsh configuratio option with known name Parameters: ----------- optionName: string name of the option """ try: # try to return option value assuming it is a string return gmsh.option.getString(optionName) # -> use built-in gmsh.option.getString method except: # option value was no string, so it must be a number return gmsh.option.getNumber(optionName) # -> use built-in gmsh.option.getNumber method ############################################################ # method to set a single Gmsh option depending on its type # ############################################################ def setGmshOption(self,optionName=None,optionValue=None): """Method to set a Gmsh configuration option Parameters: ----------- optionName: string name of the option to set optionValue: int/float/string value of the option to set """ if isinstance(optionValue,str): # option value is a string gmsh.option.setString(optionName,optionValue) # -> use built-in gmsh.option.setString method elif isinstance(optionValue,int) or isinstance(optionValue,float): # optionValue is a number gmsh.option.setNumber(optionName,optionValue) # -> use built-in gmsh.option.setNumber method ############################################################ # Method to add the model boundary to the physical entites # ############################################################ def getBoundaryEntities(self): """Method to get the entities on the boundary of the Gmsh model""" # get information for a physical entity containing the model boundary return self.gmshAPI.getBoundary(self.gmshAPI.getEntities(self.dimension), combined=True, oriented=False, recursive=False) ########################################### # Method to update the gmsh configuration # ########################################### def updateGmshConfiguration(self,configurationUpdate): """Method to update the Gmsh configuration options with a dictionary of updated options Parameters: ----------- configurationUpdate: dict dictionary of configuration options to be updated """ self.gmshConfigChanges.update(configurationUpdate) # update stored Gmsh configuration for optionName, optionValue in self.gmshConfigChanges.items(): # loop over all configuration settings self.setGmshOption(optionName=optionName,optionValue=optionValue) # -> activate changed configuration ######################################################### # Method to add all geometric objects to the Gmsh model # ######################################################### def addGeometricObjectsToGmshModel(self): """Method to add Gmsh representations of the gmshModels geometric objects""" for obj in self.geometricObjects: # loop over all geometric objects of the model gmshTag=obj.addToGmshModel(self.gmshAPI) # -> add a Gmsh representation of the object to the model and save the correspondig tag self.groups[obj.group].append(gmshTag) # -> add tag to the group of the geometric object ############################################################### # Method to perform all boolean operations for the Gmsh model # ############################################################### def performBooleanOperationsForGmshModel(self): """Method to perform defined boolean operations for the Gmsh model""" # loop over all boolean operations of the model for booleanOP in self.booleanOperations: # get details of the boolean operation to be performed operation=self._getBooleanOperation(booleanOP["operation"]) objectTags=self.groups[booleanOP["object"]] toolTags=self.groups[booleanOP["tool"]] removeObject=booleanOP["removeObject"] removeTool=booleanOP["removeTool"] resultingGroup=booleanOP["resultingGroup"] # perform boolean operation outputTags,outputTagsMap=operation(objectTags,toolTags,tag=-1,removeObject=removeObject,removeTool=removeTool) # synchronize OCC-CAD representation with model self.gmshAPI.occ.synchronize() # update groups self.groups[resultingGroup]=outputTags ############################################### # Method to add physical groups to Gmsh model # ############################################### def addPhysicalGroupsToGmshModel(self): """Method to add defined physical groups to the Gmsh model """ # loop over all physical entities of the model for physGrp in self.physicalGroups: # get details of the physical entity to add grpDim=physGrp["dimension"] # get dimension of the physical group grpName=physGrp["group"] # get the group of the physical entity (used as name) grpNumber=physGrp["physicalNumber"] # get the number defined for the physical entity (used as material number) grpEntIDs=self.getIDsFromTags(self.groups[grpName]) # find Gmsh representations of all group members and get IDs from their tags # set physical groups self.gmshAPI.addPhysicalGroup(grpDim,grpEntIDs,grpNumber) # define the entity group as physical and set correct physical number self.gmshAPI.setPhysicalName(grpDim,grpNumber,grpName) # set corresponding name of the physical group (equal to name of the group that is declared as physical for simplicity) ################################################# # Method to add refinement fields to Gmsh model # ################################################# def addRefinementFieldsToGmshModel(self): """Method to add defined refinement fields to the Gmsh model""" # loop over all refinement fields defined for the model for refineField in self.refinementFields: # get details of the refinement field to add fieldType=refineField["fieldType"] # get the type of refinement field fieldInfos=refineField["fieldInfos"] # get information required for this type of refinement field # set refinement field fieldTag=self.gmshAPI.mesh.field.add(fieldType,tag=-1) # add new refinement field and save its number for optName, optVal in fieldInfos.items(): # provide all necessary information for this field from fieldInfo dictionary if isinstance(optVal,str): # -> current option value is a string self.gmshAPI.mesh.field.setString(fieldTag,optName,optVal) # ->-> use built-in setString method of gmsh.model.mesh.field elif isinstance(optVal,int) or isinstance(optVal,float): # -> current option value is a number self.gmshAPI.mesh.field.setNumber(fieldTag,optName,optVal) # ->-> use built-in setNumber method of gmsh.model.mesh.field elif isinstance(optVal,list) or isinstance(optVal,np.ndarray): # -> current option value is a list or numpy array self.gmshAPI.mesh.field.setNumbers(fieldTag,optName,optVal) # ->-> use built-in setNumbers method of gmsh.model.mesh.field ################################################################################ # INTERFACING METHODS TO ADD GEOMETRIC OBJECTS TO THE MODEL # ################################################################################ ############################################################## # Method to add a single geometric object to the Gmsh model # ############################################################## def addGeometricObject(self,objClassString,**objData): """Method to add one of the objects that are defined within the class geometricObjects and its child classes to the Gmsh model. Parameters: ----------- objClass: class class the geometric object is defined in objData: keyworded object data enumeration of keyworded arguments needed for the creation of the new geometric object of class objectClass """ objClass=self._getGeometricObjectClass(objClassString) objInstance=objClass(**objData) objGroup=objInstance.group self.geometricObjects.append(objInstance) self.groups.update({objGroup: []}) if objGroup not in self.groups else self.groups ################################################################################ # PRIVATE/HIDDEN METHODS FOR INTERNAL USE ONLY # ################################################################################ ################################################ # Method to check file string for saving files # ################################################ def _getFileParts(self,fileString,whatToSave): """Internal method to get the file directory, name and extension from a fileString Parameters: ----------- fileString: string string to analyze whatToSave: string flag that indicates whether "geometry", "mesh" or "model" have to be saved """ # check if no fileString was passed if fileString is None: # no file string passed fileString="" # -> set empty file string # process passed file string fileDir,fileName= os.path.split(fileString) # get file directory and name (with extension) fileName,fileExt=os.path.splitext(fileName) # split file name into name and extension if fileDir == "": # check if file directory is empty fileDir="." # -> set default value (current directory) if fileName == "": # check if file name is empty fileName=self.modelName # use unique model name as file name if fileExt == "": # check if file extension is empty fileExt=self._defaultFileExts[whatToSave] # -> set default value (.gmshModel) # return file parts return fileDir, fileName, fileExt ######################################## # Method to toggle Gmsh console output # ######################################## def _gmshOutput(self,switch): """Method to enable/disable Gmsh console output""" self.setGmshOption(optionName="General.Terminal",optionValue=switch) ############################################################################ # Method to return the correct boolean operation from the operation string # ############################################################################ def _getBooleanOperation(self,operation): """Internal method to return the correct boolean operation function from an operation string Parameters: ----------- operation: string boolean operation to be performed """ if operation == "cut": # operation to be performed is "cut" return self.gmshAPI.occ.cut # -> use built-in gmsh.model.occ.cut method elif operation == "fuse": # operation to be performed is "fuse" return self.gmshAPI.occ.fuse # -> use built-in gmsh.model.occ.fuse method elif operation == "fragment": # operation to be performed is "fragment" return self.gmshAPI.occ.fragment # -> use built-in gmsh.model.occ.fragment method elif operation == "intersect": # operation to be performed is "intersect" return self.gmshAPI.occ.intersect # -> use built-in gmsh.model.occ.intersect method else: # operation to be performed is something different raise ValueError("Unknown boolean operation {}".format(operation)) # -> raise error that type of boolean operation is not known ######################################################### # Method to get defined geometric objects from a string # ######################################################### def _getGeometricObjectClass(self,objString): """Internal method to return the correct geometric object class from an object class string Parameters: ----------- objString: string required geometric object class (as a string) """ for objKey, objClass in inspect.getmembers(geomObj,inspect.isclass): # get all classes from the geometricObjects file if objKey == objString: # check if class key matches the object string that was passed return objClass # return class ########################################################### # Method to calculate the overall Gmsh model bounding box # ########################################################### def _getGmshModelBoundingBox(self): """Internal method to get the overall Gmsh model bounding box""" entityBBoxes=np.atleast_2d([self.gmshAPI.getBoundingBox(*entTag) for entTag in self.gmshAPI.getEntities()]) modelBBox= np.array([[*np.amin(entityBBoxes[:,0:3],axis=0)], [*np.amax(entityBBoxes[:,3:6],axis=0)]]) return modelBBox ########################################################## # Method to convert meshes into meshio-supported formats # ########################################################## def _convertMesh(self,inFile,outFile): """Internal method to convert meshes between different file formats using meshio Parameters: ----------- inFile: string file string (directory/name.extension) for the input mesh file outFile: string file string (directory/name.extension) for the output mesh file """ mesh=meshio.Mesh.read(inFile) # read file of mesh to convert with meshio (get mesh format from file extension) mesh.write(outFile) # write file for converted mesh (get mesh format from file extension)
################################################################################ # CLASS DEFINITION FOR MESHING MODELS GENERATED USING THE GMSH-PYTHON-API # ################################################################################ # Within this file, the generic Model class is defined. It is the base class # for other, more specific classes which aim to mesh models using the Gmsh- # Python-API. In addition to the methods defined within the Gmsh-Python-API, # this class provides methods for all basic steps of a model generation using # Gmsh: some of these methods are only placeholders here and - if required - # have to be specified/overwritten for the more specialized models. ########################### # Load required libraries # ########################### # Standard Python libraries import os # os for file handling (split extensions from file) import inspect # inspect to search for classes in modules import datetime as dt # datetime for time stamps import copy as cp # copy for deepcopies of arrays import tempfile as tf # tempfile for generation of temprory files and folders import numpy as np # numpy for array computations import pickle # pickle for saving and loading of gmshModels import logging # logging for log messages logger=logging.getLogger(__name__) # set logger # additional program libraries import gmsh # Gmsh Python-API import meshio # meshio for mesh file format conversions # self-defined class definitions and modules from ..Geometry import GeometricObjects as geomObj # classes for implemented geometric objects from ..Visualization.GeometryVisualization import GeometryVisualization, PYTHONOCC_AVAILABLE # class for geometry visualization from ..Visualization.MeshVisualization import MeshVisualization # class for mesh visualization from ..MeshExport import FeapExport ############################# # Set configuration options # ############################# SUPPORTED_GEOMETRY_FORMATS=[".brep", ".stp", ".step"] # set supported geometry formats SUPPORTED_MESH_FORMATS=list(meshio.extension_to_filetype.keys()) # set supported mesh file formats ############################# # Define GenericModel class # ############################# class GenericModel: """Generic class for meshing models generated using the Gmsh-Python-API This class provides the basic mesh generation framework for Gmsh. It implements the methods for: (1) Setting up a geometry using basic geometric entities and boolean operations (2) Adding the geometric objects to Gmsh, performing the boolean operations, defining physical groups (3) creating a mesh with user-defined refinement fields (4) saving and visualizing the mesh Some of the methods used within the framework are only defined as placeholder methods here and have to be specified in detail within the child classes. Attributes: ----------- dimension: int dimension of the model instance modelName: string name of the Gmsh model and default name for all resulting files gmshConfigChanges: dict dictionary for user updates of the default Gmsh configuration geometricObjects: list list containing the instances of geometric objects used for the model geometry creation groups: dict dictionary with group information for the model entities booleanOperations: list list with dictionaries defining the individual boolean operations to perform for the model generation physicalGroups: list list with dictionary defining which Gmsh entities are defined as physical groups (e.g. different materials) refinementFields: list list of dictionaries defining the refinement fields that have to be added to the Gmsh model backgroundField: int number of the field that has to be used as the background field/mesh for the mesh generation """ ######################### # Initialization method # ######################### def __init__(self,dimension=None,gmshConfigChanges={}): """Initialization method of a generic GmshModel object Parameters: ----------- dimension: int dimension of the model instance gmshConfigChanges: dict dictionary for user updates of the default Gmsh configuration """ # set unique model name self.modelName="Model_"+dt.datetime.now().strftime("%Y%m%d_%H%M%S%f") # use time stamp of initialization (with microseconds) for unique model name # set default file extensions depending on the savin method self._defaultFileExts={ "Geometry": ".brep", # use ".brep" as default extension for saving the geometry "Mesh": ".msh", # use ".msh" as default extension for saving meshes "Model": ".gmshModel", # use ".gmshModel" as default extension for saving models "Misc": ".txt" # use ".txt" as default extension for miscellaneous information } # initialize Gmsh-Python-API self.gmshConfigChanges={ # default Gmsh configuration changes "General.Terminal": 0, # deactivate console output by default (only activated for mesh generation) "Geometry.OCCBoundsUseStl": 1, # use more accurate computation of bounding boxes (slower but advantegous for periodicity constraints) "Geometry.Tolerance": 1e-12 # adjust geometric tolerance to be a little more precise then default (1e-8) } self.gmshAPI=self.initializeGmsh(gmshConfigChanges) # this assignment facilitates the usage of all methods provided by the gmsh.model class # initialize attributes that all instances of GenericModel should have self.dimension=dimension # set (highest) dimension of the model self.geometricObjects=[] # initialze empty list of geomtric objects (used for model generation) self.groups={} # initialize empty dictionary of groups (used for boolean operations adn physical groups) self.booleanOperations=[] # initialize empty list of defined boolean operations (used to generate the model from basic geometrical objects) self.physicalGroups=[] # initialize empty list of defined physical groups (used to identify materials and boundaries within the mesh) self.refinementFields=[] # initialize empty list of refinement fields (used to control the mesh sizes) self.backgroundField=None # initialize background field for the meshing algorithm (used to control the mesh sizes) ################################################################################ # MAIN METHODS FOR MODEL AND MESH GENERATION # ################################################################################ ############################################ # Method to initialize the Gmsh-Python-API # ############################################ def initializeGmsh(self,gmshConfigChanges={}): """Gmsh initialization method This method initializes the Gmsh-Python-API and adds it to the GmshModel Parameters: ----------- gmshConfigChanges: dict dictionary with Gmsh configuration options that have to be set """ gmsh.initialize('',False) # initialize Gmsh Python-API without using local .gmshrc configuration file self.updateGmshConfiguration(gmshConfigChanges) # update default configuration with user updates and set the options gmshAPI=gmsh.model # define gmshAPI as the model class of the Gmsh-Python-API (contains only static methods -> no instance required) gmshAPI.add(self.modelName) # add new model to the gmshAPI return gmshAPI # retrun gmshAPI ################################################################ # Method to set up model information and create the Gmsh model # ################################################################ def createGmshModel(self,**geometryOptions): """Method to create the Gmsh Model and provide necessary information to it This method contains the basic Gmsh model creation steps: after geoetric objects are defined, boolean operations are performed to generate the final geometry. Parts of the geometry are combined to physical groups in order to be able to assign, e.g., material parameters to them. If required, a periodicity constraint is finally added to the model. Parameters: ----------- geometryOptions: key-value pairs of options key-value pairs of options required for the geometry generation process """ # define geometric objects and add them to the Gmsh model self.defineGeometricObjects(**geometryOptions) # placeholder method: has to be specified/overwritten for the individual models self.addGeometricObjectsToGmshModel() # use Gmsh-API to add geometric information to the Gmsh model # define boolean operations and add them to the Gmsh model (perform them) self.defineBooleanOperations() # placeholder method: has to be specified/overwritten for the individual models self.performBooleanOperationsForGmshModel() # use Gmsh-API to perform defined operations # define physical groups and add them to the Gmsh model self.definePhysicalGroups() # placeholder method: has to be specified/overwritten for the individual models self.addPhysicalGroupsToGmshModel() # use Gmsh-API to add defined groups to the Gmsh model # set up periodicity constraints self.setupPeriodicity() # placeholder method: has to be specified/overwritten for the individual models if necessary #################################################################### # Method to calculate refinement information and generate the mesh # #################################################################### def createMesh(self,threads=None,refinementOptions={}): """Method to generate the model mesh This method contains the basic mesh generation steps for a Gmsh model: refinement fields are calculated with user-defined options and added to the model. Afterwards, a background field is specified and used for the mesh size computation within Gmsh. Finally, the mesh is created. Parameters: ----------- threads: int number of threads to use for the mesh generation refinementOptions: dict dictionary with user-defined options for the refinement field calculations """ if threads is not None: # set number of threads in Gmsh self.updateGmshConfiguration({"Mesh.MaxNumThreads1D": threads, "Mesh.MaxNumThreads2D": threads, "Mesh.MaxNumThreads3D": threads}) # deine refinement information and add them to the Gmsh model self.defineRefinementFields(refinementOptions=refinementOptions) # placeholder method: has to be specified/overwritten for the individual models self.addRefinementFieldsToGmshModel() # use Gmsh-API to add defined fields to the Gmsh model # set background field for meshing procedure (if possible) if not self.backgroundField is None: self.gmshAPI.mesh.field.setAsBackgroundMesh(self.backgroundField) # define background field (field which is used for mesh size determination) # generate mesh (with activate console output) self._gmshOutput(1) # activate Gmsh console output self.gmshAPI.mesh.generate(self.dimension) # generate mesh using the Gmsh-API self._gmshOutput(0) # deactivate Gmsh console output ############################################################ # Closing method to terminate the current Gmsh-API session # ############################################################ def close(self): """Gmsh finalization method The Gmsh-Python-API has to be finalized for a proper termination of the model. """ gmsh.finalize() ################################################################################ # MAIN METHODS FOR LOADING AND SAVING INFORMATION # ################################################################################ ######################################################## # Method to save the geometry of the model into a file # ######################################################## def saveGeometry(self,file=None): """Method to save the generated geometry into a geometry file This method allows to store geometry information into ".step" or ".brep"- files. """ # get fileparts of passed file string (return defaults if nothing is passed) fileDir,fileName,fileExt=self._getFileParts(file,"Geometry") if fileExt in SUPPORTED_GEOMETRY_FORMATS: # check if file extension is supported by gmsh gmsh.write(fileDir+"/"+fileName+fileExt) # write geometry to file (use Gmsh-internal "guess from extension" feature) else: raise ValueError("Unknown geometry file extension {}. The output geometry format must be supported by the gmsh library.".format(fileExt)) #################################################### # Method to save the mesh of the model into a file # #################################################### def saveMesh(self,file=None): """Method to save the generated mesh into a mesh file After the mesh is generated, it has to be saved into a usable file format. Here, all meshes that are supported by the meshio library, can be used to save the mesh. If meshio is not available, the mesh format is restricted """ # get fileparts of passed file string (return defaults if nothing is passed) fileDir,fileName,fileExt=self._getFileParts(file,"Mesh") # create mesh file depending on the chosen file extension os.makedirs(fileDir,exist_ok=True) # ensure that the file directory exists if fileExt == ".msh": # file extension is ".msh" gmsh.write(fileDir+"/"+fileName+fileExt) # -> save mesh using built-in gmsh.write method elif fileExt == '.msh2': # file extension is ".msh2" gmsh.option.setNumber("Mesh.MshFileVersion", 2.) # change format to msh2 gmsh.write(fileDir+"/"+fileName+".msh") # -> save mesh using built-in gmsh.write method elif fileExt == ".feap": # file extension is ".feap" -> write feap mesh files FeapExport(self) else: # file extension is different from ".msh" if fileExt in SUPPORTED_MESH_FORMATS: # -> check if file extension is supported by meshio with tf.TemporaryDirectory() as tmpDir: # ->-> create temporary directory tmpFile=tmpDir+"/"+self.modelName+".msh" # ->-> create temporary file gmshBinaryConfig=self.getGmshOption("Mesh.Binary") # ->-> get Gmsh configuration for binary mesh export self.setGmshOption("Mesh.Binary",1) # ->-> temporarily activate binary mesh export (reduce file size, increase speed) gmsh.write(tmpFile) # ->-> use built-in gmsh.write method to generate binary mesh in temporary folder self.setGmshOption("Mesh.Binary",gmshBinaryConfig) # ->-> reset Gmsh configuration self._convertMesh(tmpFile,fileDir+"/"+fileName+fileExt) # ->-> convert mesh to required file format else: # raise error if mesh file format is not supported by meshio raise ValueError("Unknown mesh file extension {}. The output mesh format must be supported by the meshio library.".format(fileExt)) ##################################################### # Method to save Gmsh model object to a binary file # ##################################################### def saveModel(self,file=None): """Method to save the complete model into a pickle object In order to be able to reuse generated models, the whole model can be saved. Within this method, the pickle module is used to save the model to a binary file. """ # get file parts of passed file string (return defaultsd if nothing is passed) fileDir,fileName,fileExt=self.__getFileParts(file,"Model") # save file os.makedirs(fileDir,exist_ok=True) # ensure that the file directory exists with open(fileDir+"/"+fileName+fileExt,"wb") as file: # open file with writing permissions in binary mode pickle.dump(self,file) # save file using pickle ################################################# # Class method to load existing model instances # ################################################# @classmethod # define method as a class method def load(cls,fileName): """Method to load an existing GmshModel object Objects that have been saved to a binary file using the pickle module can be reloaded here. """ with open("fileName","rb") as file: # load file return pickle.load(file) # load saved file with pickle module ################################################################################ # VISUALIZATION METHODS # ################################################################################ ########################################################## # Method to visualize the model geometry using pythonocc # ########################################################## def visualizeGeometry(self): """Method to visualize the Gmsh model geometry using pythonocc""" if PYTHONOCC_AVAILABLE: # optional pythonocc package is available GeometryVisualization(self) # -> visualize the geometry else: # optional pythonocc package is unavailable logger.warning("Geometry visualization is unavailabe due to missing packages.") # do nothing but printing a warning ############################################################ # Method to visualize the model mesh using pyvista and vtk # ############################################################ def visualizeMesh(self): """Method to visualize the generated mesh using pyvista and vtk""" MeshVisualization(self) # -> visualize the mesh #################################################################### # Method to show the whole model in Gmsh using the Gmsh-Python-API # #################################################################### def showModelInGmsh(self): """Method to open the complete model in Gmsh""" gmsh.fltk.run() ################################################################################ # PLACEHOLDER METHODS TO BE SPECIALIZED FOR THE INDIVIDUAL MODELS # ################################################################################ ################################################## # Method for the definition of geometric objects # ################################################## def defineGeometricObjects(self,**options): """Placeholder method for the definition of geometric objects. Has to be specified in child classes""" pass ################################################### # Method for the definition of boolean operations # ################################################### def defineBooleanOperations(self): """Placeholder method for the definition of necessary boolean Operations. Has to be specified in child classes""" pass ################################################ # Method for the definition of physical groups # ################################################ def definePhysicalGroups(self): """Placeholder method to define required physical groups. Has to be specified in child classes""" pass ########################################### # Method to define refinement information # ########################################### def defineRefinementFields(self): """Placeholder method to define/compute refinement fields for the mesh generation. Has to be specified in child classes""" pass ############################################################### # Method to set up periodicity constraints for the Gmsh model # ############################################################### def setupPeriodicity(self): """Placeholder method to set up periodicity constraints for RVEs. Has to be specified in child classes, if required""" pass ################################################################################ # INTERFACING METHODS TO PASS INFORMATION TO THE GMSH MODEL # ################################################################################ ############################################### # Method to transform Gmsh entity Tags to IDs # ############################################### def getIDsFromTags(self,tags): """Interfacing method to get Gmsh entity IDs from given list of Gmsh entityTags Parameters: ----------- tags: (list of) tuples list of Gmsh entity tag tuples tags=(entityDimension, entityID) """ _,IDs=map(list,zip(*tags)) # get IDs of tags-array (tag: (dimension,ID)-tuple) return IDs # return IDs ############################################################ # method to get a single Gmsh option depending on its type # ############################################################ def getGmshOption(self,optionName): """Method to get the value of a Gmsh configuratio option with known name Parameters: ----------- optionName: string name of the option """ try: # try to return option value assuming it is a string return gmsh.option.getString(optionName) # -> use built-in gmsh.option.getString method except: # option value was no string, so it must be a number return gmsh.option.getNumber(optionName) # -> use built-in gmsh.option.getNumber method ############################################################ # method to set a single Gmsh option depending on its type # ############################################################ def setGmshOption(self,optionName=None,optionValue=None): """Method to set a Gmsh configuration option Parameters: ----------- optionName: string name of the option to set optionValue: int/float/string value of the option to set """ if isinstance(optionValue,str): # option value is a string gmsh.option.setString(optionName,optionValue) # -> use built-in gmsh.option.setString method elif isinstance(optionValue,int) or isinstance(optionValue,float): # optionValue is a number gmsh.option.setNumber(optionName,optionValue) # -> use built-in gmsh.option.setNumber method ############################################################ # Method to add the model boundary to the physical entites # ############################################################ def getBoundaryEntities(self): """Method to get the entities on the boundary of the Gmsh model""" # get information for a physical entity containing the model boundary return self.gmshAPI.getBoundary(self.gmshAPI.getEntities(self.dimension), combined=True, oriented=False, recursive=False) ########################################### # Method to update the gmsh configuration # ########################################### def updateGmshConfiguration(self,configurationUpdate): """Method to update the Gmsh configuration options with a dictionary of updated options Parameters: ----------- configurationUpdate: dict dictionary of configuration options to be updated """ self.gmshConfigChanges.update(configurationUpdate) # update stored Gmsh configuration for optionName, optionValue in self.gmshConfigChanges.items(): # loop over all configuration settings self.setGmshOption(optionName=optionName,optionValue=optionValue) # -> activate changed configuration ######################################################### # Method to add all geometric objects to the Gmsh model # ######################################################### def addGeometricObjectsToGmshModel(self): """Method to add Gmsh representations of the gmshModels geometric objects""" for obj in self.geometricObjects: # loop over all geometric objects of the model gmshTag=obj.addToGmshModel(self.gmshAPI) # -> add a Gmsh representation of the object to the model and save the correspondig tag self.groups[obj.group].append(gmshTag) # -> add tag to the group of the geometric object ############################################################### # Method to perform all boolean operations for the Gmsh model # ############################################################### def performBooleanOperationsForGmshModel(self): """Method to perform defined boolean operations for the Gmsh model""" # loop over all boolean operations of the model for booleanOP in self.booleanOperations: # get details of the boolean operation to be performed operation=self._getBooleanOperation(booleanOP["operation"]) objectTags=self.groups[booleanOP["object"]] toolTags=self.groups[booleanOP["tool"]] removeObject=booleanOP["removeObject"] removeTool=booleanOP["removeTool"] resultingGroup=booleanOP["resultingGroup"] # perform boolean operation outputTags,outputTagsMap=operation(objectTags,toolTags,tag=-1,removeObject=removeObject,removeTool=removeTool) # synchronize OCC-CAD representation with model self.gmshAPI.occ.synchronize() # update groups self.groups[resultingGroup]=outputTags ############################################### # Method to add physical groups to Gmsh model # ############################################### def addPhysicalGroupsToGmshModel(self): """Method to add defined physical groups to the Gmsh model """ # loop over all physical entities of the model for physGrp in self.physicalGroups: # get details of the physical entity to add grpDim=physGrp["dimension"] # get dimension of the physical group grpName=physGrp["group"] # get the group of the physical entity (used as name) grpNumber=physGrp["physicalNumber"] # get the number defined for the physical entity (used as material number) grpEntIDs=self.getIDsFromTags(self.groups[grpName]) # find Gmsh representations of all group members and get IDs from their tags # set physical groups self.gmshAPI.addPhysicalGroup(grpDim,grpEntIDs,grpNumber) # define the entity group as physical and set correct physical number self.gmshAPI.setPhysicalName(grpDim,grpNumber,grpName) # set corresponding name of the physical group (equal to name of the group that is declared as physical for simplicity) ################################################# # Method to add refinement fields to Gmsh model # ################################################# def addRefinementFieldsToGmshModel(self): """Method to add defined refinement fields to the Gmsh model""" # loop over all refinement fields defined for the model for refineField in self.refinementFields: # get details of the refinement field to add fieldType=refineField["fieldType"] # get the type of refinement field fieldInfos=refineField["fieldInfos"] # get information required for this type of refinement field # set refinement field fieldTag=self.gmshAPI.mesh.field.add(fieldType,tag=-1) # add new refinement field and save its number for optName, optVal in fieldInfos.items(): # provide all necessary information for this field from fieldInfo dictionary if isinstance(optVal,str): # -> current option value is a string self.gmshAPI.mesh.field.setString(fieldTag,optName,optVal) # ->-> use built-in setString method of gmsh.model.mesh.field elif isinstance(optVal,int) or isinstance(optVal,float): # -> current option value is a number self.gmshAPI.mesh.field.setNumber(fieldTag,optName,optVal) # ->-> use built-in setNumber method of gmsh.model.mesh.field elif isinstance(optVal,list) or isinstance(optVal,np.ndarray): # -> current option value is a list or numpy array self.gmshAPI.mesh.field.setNumbers(fieldTag,optName,optVal) # ->-> use built-in setNumbers method of gmsh.model.mesh.field ################################################################################ # INTERFACING METHODS TO ADD GEOMETRIC OBJECTS TO THE MODEL # ################################################################################ ############################################################## # Method to add a single geometric object to the Gmsh model # ############################################################## def addGeometricObject(self,objClassString,**objData): """Method to add one of the objects that are defined within the class geometricObjects and its child classes to the Gmsh model. Parameters: ----------- objClass: class class the geometric object is defined in objData: keyworded object data enumeration of keyworded arguments needed for the creation of the new geometric object of class objectClass """ objClass=self._getGeometricObjectClass(objClassString) objInstance=objClass(**objData) objGroup=objInstance.group self.geometricObjects.append(objInstance) self.groups.update({objGroup: []}) if objGroup not in self.groups else self.groups ################################################################################ # PRIVATE/HIDDEN METHODS FOR INTERNAL USE ONLY # ################################################################################ ################################################ # Method to check file string for saving files # ################################################ def _getFileParts(self,fileString,whatToSave): """Internal method to get the file directory, name and extension from a fileString Parameters: ----------- fileString: string string to analyze whatToSave: string flag that indicates whether "geometry", "mesh" or "model" have to be saved """ # check if no fileString was passed if fileString is None: # no file string passed fileString="" # -> set empty file string # process passed file string fileDir,fileName= os.path.split(fileString) # get file directory and name (with extension) fileName,fileExt=os.path.splitext(fileName) # split file name into name and extension if fileDir == "": # check if file directory is empty fileDir="." # -> set default value (current directory) if fileName == "": # check if file name is empty fileName=self.modelName # use unique model name as file name if fileExt == "": # check if file extension is empty fileExt=self._defaultFileExts[whatToSave] # -> set default value (.gmshModel) # return file parts return fileDir, fileName, fileExt ######################################## # Method to toggle Gmsh console output # ######################################## def _gmshOutput(self,switch): """Method to enable/disable Gmsh console output""" self.setGmshOption(optionName="General.Terminal",optionValue=switch) ############################################################################ # Method to return the correct boolean operation from the operation string # ############################################################################ def _getBooleanOperation(self,operation): """Internal method to return the correct boolean operation function from an operation string Parameters: ----------- operation: string boolean operation to be performed """ if operation == "cut": # operation to be performed is "cut" return self.gmshAPI.occ.cut # -> use built-in gmsh.model.occ.cut method elif operation == "fuse": # operation to be performed is "fuse" return self.gmshAPI.occ.fuse # -> use built-in gmsh.model.occ.fuse method elif operation == "fragment": # operation to be performed is "fragment" return self.gmshAPI.occ.fragment # -> use built-in gmsh.model.occ.fragment method elif operation == "intersect": # operation to be performed is "intersect" return self.gmshAPI.occ.intersect # -> use built-in gmsh.model.occ.intersect method else: # operation to be performed is something different raise ValueError("Unknown boolean operation {}".format(operation)) # -> raise error that type of boolean operation is not known ######################################################### # Method to get defined geometric objects from a string # ######################################################### def _getGeometricObjectClass(self,objString): """Internal method to return the correct geometric object class from an object class string Parameters: ----------- objString: string required geometric object class (as a string) """ for objKey, objClass in inspect.getmembers(geomObj,inspect.isclass): # get all classes from the geometricObjects file if objKey == objString: # check if class key matches the object string that was passed return objClass # return class ########################################################### # Method to calculate the overall Gmsh model bounding box # ########################################################### def _getGmshModelBoundingBox(self): """Internal method to get the overall Gmsh model bounding box""" entityBBoxes=np.atleast_2d([self.gmshAPI.getBoundingBox(*entTag) for entTag in self.gmshAPI.getEntities()]) modelBBox= np.array([[*np.amin(entityBBoxes[:,0:3],axis=0)], [*np.amax(entityBBoxes[:,3:6],axis=0)]]) return modelBBox ########################################################## # Method to convert meshes into meshio-supported formats # ########################################################## def _convertMesh(self,inFile,outFile): """Internal method to convert meshes between different file formats using meshio Parameters: ----------- inFile: string file string (directory/name.extension) for the input mesh file outFile: string file string (directory/name.extension) for the output mesh file """ mesh=meshio.Mesh.read(inFile) # read file of mesh to convert with meshio (get mesh format from file extension) mesh.write(outFile) # write file for converted mesh (get mesh format from file extension)
en
0.478093
################################################################################ # CLASS DEFINITION FOR MESHING MODELS GENERATED USING THE GMSH-PYTHON-API # ################################################################################ # Within this file, the generic Model class is defined. It is the base class # for other, more specific classes which aim to mesh models using the Gmsh- # Python-API. In addition to the methods defined within the Gmsh-Python-API, # this class provides methods for all basic steps of a model generation using # Gmsh: some of these methods are only placeholders here and - if required - # have to be specified/overwritten for the more specialized models. ########################### # Load required libraries # ########################### # Standard Python libraries # os for file handling (split extensions from file) # inspect to search for classes in modules # datetime for time stamps # copy for deepcopies of arrays # tempfile for generation of temprory files and folders # numpy for array computations # pickle for saving and loading of gmshModels # logging for log messages # set logger # additional program libraries # Gmsh Python-API # meshio for mesh file format conversions # self-defined class definitions and modules # classes for implemented geometric objects # class for geometry visualization # class for mesh visualization ############################# # Set configuration options # ############################# # set supported geometry formats # set supported mesh file formats ############################# # Define GenericModel class # ############################# Generic class for meshing models generated using the Gmsh-Python-API This class provides the basic mesh generation framework for Gmsh. It implements the methods for: (1) Setting up a geometry using basic geometric entities and boolean operations (2) Adding the geometric objects to Gmsh, performing the boolean operations, defining physical groups (3) creating a mesh with user-defined refinement fields (4) saving and visualizing the mesh Some of the methods used within the framework are only defined as placeholder methods here and have to be specified in detail within the child classes. Attributes: ----------- dimension: int dimension of the model instance modelName: string name of the Gmsh model and default name for all resulting files gmshConfigChanges: dict dictionary for user updates of the default Gmsh configuration geometricObjects: list list containing the instances of geometric objects used for the model geometry creation groups: dict dictionary with group information for the model entities booleanOperations: list list with dictionaries defining the individual boolean operations to perform for the model generation physicalGroups: list list with dictionary defining which Gmsh entities are defined as physical groups (e.g. different materials) refinementFields: list list of dictionaries defining the refinement fields that have to be added to the Gmsh model backgroundField: int number of the field that has to be used as the background field/mesh for the mesh generation ######################### # Initialization method # ######################### Initialization method of a generic GmshModel object Parameters: ----------- dimension: int dimension of the model instance gmshConfigChanges: dict dictionary for user updates of the default Gmsh configuration # set unique model name # use time stamp of initialization (with microseconds) for unique model name # set default file extensions depending on the savin method # use ".brep" as default extension for saving the geometry # use ".msh" as default extension for saving meshes # use ".gmshModel" as default extension for saving models # use ".txt" as default extension for miscellaneous information # initialize Gmsh-Python-API # default Gmsh configuration changes # deactivate console output by default (only activated for mesh generation) # use more accurate computation of bounding boxes (slower but advantegous for periodicity constraints) # adjust geometric tolerance to be a little more precise then default (1e-8) # this assignment facilitates the usage of all methods provided by the gmsh.model class # initialize attributes that all instances of GenericModel should have # set (highest) dimension of the model # initialze empty list of geomtric objects (used for model generation) # initialize empty dictionary of groups (used for boolean operations adn physical groups) # initialize empty list of defined boolean operations (used to generate the model from basic geometrical objects) # initialize empty list of defined physical groups (used to identify materials and boundaries within the mesh) # initialize empty list of refinement fields (used to control the mesh sizes) # initialize background field for the meshing algorithm (used to control the mesh sizes) ################################################################################ # MAIN METHODS FOR MODEL AND MESH GENERATION # ################################################################################ ############################################ # Method to initialize the Gmsh-Python-API # ############################################ Gmsh initialization method This method initializes the Gmsh-Python-API and adds it to the GmshModel Parameters: ----------- gmshConfigChanges: dict dictionary with Gmsh configuration options that have to be set # initialize Gmsh Python-API without using local .gmshrc configuration file # update default configuration with user updates and set the options # define gmshAPI as the model class of the Gmsh-Python-API (contains only static methods -> no instance required) # add new model to the gmshAPI # retrun gmshAPI ################################################################ # Method to set up model information and create the Gmsh model # ################################################################ Method to create the Gmsh Model and provide necessary information to it This method contains the basic Gmsh model creation steps: after geoetric objects are defined, boolean operations are performed to generate the final geometry. Parts of the geometry are combined to physical groups in order to be able to assign, e.g., material parameters to them. If required, a periodicity constraint is finally added to the model. Parameters: ----------- geometryOptions: key-value pairs of options key-value pairs of options required for the geometry generation process # define geometric objects and add them to the Gmsh model # placeholder method: has to be specified/overwritten for the individual models # use Gmsh-API to add geometric information to the Gmsh model # define boolean operations and add them to the Gmsh model (perform them) # placeholder method: has to be specified/overwritten for the individual models # use Gmsh-API to perform defined operations # define physical groups and add them to the Gmsh model # placeholder method: has to be specified/overwritten for the individual models # use Gmsh-API to add defined groups to the Gmsh model # set up periodicity constraints # placeholder method: has to be specified/overwritten for the individual models if necessary #################################################################### # Method to calculate refinement information and generate the mesh # #################################################################### Method to generate the model mesh This method contains the basic mesh generation steps for a Gmsh model: refinement fields are calculated with user-defined options and added to the model. Afterwards, a background field is specified and used for the mesh size computation within Gmsh. Finally, the mesh is created. Parameters: ----------- threads: int number of threads to use for the mesh generation refinementOptions: dict dictionary with user-defined options for the refinement field calculations # set number of threads in Gmsh # deine refinement information and add them to the Gmsh model # placeholder method: has to be specified/overwritten for the individual models # use Gmsh-API to add defined fields to the Gmsh model # set background field for meshing procedure (if possible) # define background field (field which is used for mesh size determination) # generate mesh (with activate console output) # activate Gmsh console output # generate mesh using the Gmsh-API # deactivate Gmsh console output ############################################################ # Closing method to terminate the current Gmsh-API session # ############################################################ Gmsh finalization method The Gmsh-Python-API has to be finalized for a proper termination of the model. ################################################################################ # MAIN METHODS FOR LOADING AND SAVING INFORMATION # ################################################################################ ######################################################## # Method to save the geometry of the model into a file # ######################################################## Method to save the generated geometry into a geometry file This method allows to store geometry information into ".step" or ".brep"- files. # get fileparts of passed file string (return defaults if nothing is passed) # check if file extension is supported by gmsh # write geometry to file (use Gmsh-internal "guess from extension" feature) #################################################### # Method to save the mesh of the model into a file # #################################################### Method to save the generated mesh into a mesh file After the mesh is generated, it has to be saved into a usable file format. Here, all meshes that are supported by the meshio library, can be used to save the mesh. If meshio is not available, the mesh format is restricted # get fileparts of passed file string (return defaults if nothing is passed) # create mesh file depending on the chosen file extension # ensure that the file directory exists # file extension is ".msh" # -> save mesh using built-in gmsh.write method # file extension is ".msh2" # change format to msh2 # -> save mesh using built-in gmsh.write method # file extension is ".feap" -> write feap mesh files # file extension is different from ".msh" # -> check if file extension is supported by meshio # ->-> create temporary directory # ->-> create temporary file # ->-> get Gmsh configuration for binary mesh export # ->-> temporarily activate binary mesh export (reduce file size, increase speed) # ->-> use built-in gmsh.write method to generate binary mesh in temporary folder # ->-> reset Gmsh configuration # ->-> convert mesh to required file format # raise error if mesh file format is not supported by meshio ##################################################### # Method to save Gmsh model object to a binary file # ##################################################### Method to save the complete model into a pickle object In order to be able to reuse generated models, the whole model can be saved. Within this method, the pickle module is used to save the model to a binary file. # get file parts of passed file string (return defaultsd if nothing is passed) # save file # ensure that the file directory exists # open file with writing permissions in binary mode # save file using pickle ################################################# # Class method to load existing model instances # ################################################# # define method as a class method Method to load an existing GmshModel object Objects that have been saved to a binary file using the pickle module can be reloaded here. # load file # load saved file with pickle module ################################################################################ # VISUALIZATION METHODS # ################################################################################ ########################################################## # Method to visualize the model geometry using pythonocc # ########################################################## Method to visualize the Gmsh model geometry using pythonocc # optional pythonocc package is available # -> visualize the geometry # optional pythonocc package is unavailable # do nothing but printing a warning ############################################################ # Method to visualize the model mesh using pyvista and vtk # ############################################################ Method to visualize the generated mesh using pyvista and vtk # -> visualize the mesh #################################################################### # Method to show the whole model in Gmsh using the Gmsh-Python-API # #################################################################### Method to open the complete model in Gmsh ################################################################################ # PLACEHOLDER METHODS TO BE SPECIALIZED FOR THE INDIVIDUAL MODELS # ################################################################################ ################################################## # Method for the definition of geometric objects # ################################################## Placeholder method for the definition of geometric objects. Has to be specified in child classes ################################################### # Method for the definition of boolean operations # ################################################### Placeholder method for the definition of necessary boolean Operations. Has to be specified in child classes ################################################ # Method for the definition of physical groups # ################################################ Placeholder method to define required physical groups. Has to be specified in child classes ########################################### # Method to define refinement information # ########################################### Placeholder method to define/compute refinement fields for the mesh generation. Has to be specified in child classes ############################################################### # Method to set up periodicity constraints for the Gmsh model # ############################################################### Placeholder method to set up periodicity constraints for RVEs. Has to be specified in child classes, if required ################################################################################ # INTERFACING METHODS TO PASS INFORMATION TO THE GMSH MODEL # ################################################################################ ############################################### # Method to transform Gmsh entity Tags to IDs # ############################################### Interfacing method to get Gmsh entity IDs from given list of Gmsh entityTags Parameters: ----------- tags: (list of) tuples list of Gmsh entity tag tuples tags=(entityDimension, entityID) # get IDs of tags-array (tag: (dimension,ID)-tuple) # return IDs ############################################################ # method to get a single Gmsh option depending on its type # ############################################################ Method to get the value of a Gmsh configuratio option with known name Parameters: ----------- optionName: string name of the option # try to return option value assuming it is a string # -> use built-in gmsh.option.getString method # option value was no string, so it must be a number # -> use built-in gmsh.option.getNumber method ############################################################ # method to set a single Gmsh option depending on its type # ############################################################ Method to set a Gmsh configuration option Parameters: ----------- optionName: string name of the option to set optionValue: int/float/string value of the option to set # option value is a string # -> use built-in gmsh.option.setString method # optionValue is a number # -> use built-in gmsh.option.setNumber method ############################################################ # Method to add the model boundary to the physical entites # ############################################################ Method to get the entities on the boundary of the Gmsh model # get information for a physical entity containing the model boundary ########################################### # Method to update the gmsh configuration # ########################################### Method to update the Gmsh configuration options with a dictionary of updated options Parameters: ----------- configurationUpdate: dict dictionary of configuration options to be updated # update stored Gmsh configuration # loop over all configuration settings # -> activate changed configuration ######################################################### # Method to add all geometric objects to the Gmsh model # ######################################################### Method to add Gmsh representations of the gmshModels geometric objects # loop over all geometric objects of the model # -> add a Gmsh representation of the object to the model and save the correspondig tag # -> add tag to the group of the geometric object ############################################################### # Method to perform all boolean operations for the Gmsh model # ############################################################### Method to perform defined boolean operations for the Gmsh model # loop over all boolean operations of the model # get details of the boolean operation to be performed # perform boolean operation # synchronize OCC-CAD representation with model # update groups ############################################### # Method to add physical groups to Gmsh model # ############################################### Method to add defined physical groups to the Gmsh model # loop over all physical entities of the model # get details of the physical entity to add # get dimension of the physical group # get the group of the physical entity (used as name) # get the number defined for the physical entity (used as material number) # find Gmsh representations of all group members and get IDs from their tags # set physical groups # define the entity group as physical and set correct physical number # set corresponding name of the physical group (equal to name of the group that is declared as physical for simplicity) ################################################# # Method to add refinement fields to Gmsh model # ################################################# Method to add defined refinement fields to the Gmsh model # loop over all refinement fields defined for the model # get details of the refinement field to add # get the type of refinement field # get information required for this type of refinement field # set refinement field # add new refinement field and save its number # provide all necessary information for this field from fieldInfo dictionary # -> current option value is a string # ->-> use built-in setString method of gmsh.model.mesh.field # -> current option value is a number # ->-> use built-in setNumber method of gmsh.model.mesh.field # -> current option value is a list or numpy array # ->-> use built-in setNumbers method of gmsh.model.mesh.field ################################################################################ # INTERFACING METHODS TO ADD GEOMETRIC OBJECTS TO THE MODEL # ################################################################################ ############################################################## # Method to add a single geometric object to the Gmsh model # ############################################################## Method to add one of the objects that are defined within the class geometricObjects and its child classes to the Gmsh model. Parameters: ----------- objClass: class class the geometric object is defined in objData: keyworded object data enumeration of keyworded arguments needed for the creation of the new geometric object of class objectClass ################################################################################ # PRIVATE/HIDDEN METHODS FOR INTERNAL USE ONLY # ################################################################################ ################################################ # Method to check file string for saving files # ################################################ Internal method to get the file directory, name and extension from a fileString Parameters: ----------- fileString: string string to analyze whatToSave: string flag that indicates whether "geometry", "mesh" or "model" have to be saved # check if no fileString was passed # no file string passed # -> set empty file string # process passed file string # get file directory and name (with extension) # split file name into name and extension # check if file directory is empty # -> set default value (current directory) # check if file name is empty # use unique model name as file name # check if file extension is empty # -> set default value (.gmshModel) # return file parts ######################################## # Method to toggle Gmsh console output # ######################################## Method to enable/disable Gmsh console output ############################################################################ # Method to return the correct boolean operation from the operation string # ############################################################################ Internal method to return the correct boolean operation function from an operation string Parameters: ----------- operation: string boolean operation to be performed # operation to be performed is "cut" # -> use built-in gmsh.model.occ.cut method # operation to be performed is "fuse" # -> use built-in gmsh.model.occ.fuse method # operation to be performed is "fragment" # -> use built-in gmsh.model.occ.fragment method # operation to be performed is "intersect" # -> use built-in gmsh.model.occ.intersect method # operation to be performed is something different # -> raise error that type of boolean operation is not known ######################################################### # Method to get defined geometric objects from a string # ######################################################### Internal method to return the correct geometric object class from an object class string Parameters: ----------- objString: string required geometric object class (as a string) # get all classes from the geometricObjects file # check if class key matches the object string that was passed # return class ########################################################### # Method to calculate the overall Gmsh model bounding box # ########################################################### Internal method to get the overall Gmsh model bounding box ########################################################## # Method to convert meshes into meshio-supported formats # ########################################################## Internal method to convert meshes between different file formats using meshio Parameters: ----------- inFile: string file string (directory/name.extension) for the input mesh file outFile: string file string (directory/name.extension) for the output mesh file # read file of mesh to convert with meshio (get mesh format from file extension) # write file for converted mesh (get mesh format from file extension)
2.023766
2
app/server/list/views.py
tderleth/2-item-catalog
0
6629800
<reponame>tderleth/2-item-catalog #!/usr/bin/env python2.7 # -*- coding: utf-8 -*- """List views.""" from app.server.auth import login_required from app.server.database import db_session from app.server.database.list import List from flask import session as login_session from flask import Blueprint, render_template, redirect from flask import jsonify, request, flash, url_for list = Blueprint("list", __name__, template_folder="templates") @list.route('/json') @list.route('/') def index(): """Get all lists.""" data = List.query.all() if '/json' in request.path: lists = [] for list in data: lists.append(list.as_dict()) return jsonify(lists) else: return render_template('list/index.html', lists=data) @list.route('/<int:list_id>/json') @list.route('/<int:list_id>') def show(list_id): """Get single list via id.""" data = db_session.query(List).filter(List.id == list_id).first() if '/json' in request.path: return jsonify(data.as_dict()) else: return render_template('list/show.html', list=data) @list.route('/create', methods=['POST']) @login_required def create(): """Store new list.""" name = request.form.get("name") if not name: flash("Please provide a name") return redirect(url_for('list.index')) list = List(name=name, user_id=login_session['user_id']) db_session.add(list) db_session.commit() flash("New list %s created" % name) return redirect(url_for('list.index')) @list.route('/<int:list_id>/destroy', methods=['GET']) @login_required def destory(list_id): """Delete list.""" list = db_session.query(List).filter(List.id == list_id).first() if(list.user_id != login_session['user_id']): flash("This list does not belong to your account") return redirect(url_for('list.index')) db_session.delete(list) db_session.commit() flash("List %s destroyed" % list.name) return redirect(url_for('list.index')) @list.route('/<int:list_id>/update', methods=['POST']) @login_required def update(list_id): """Update list.""" list = db_session.query(List).filter(List.id == list_id).first() if(list.user_id != login_session['user_id']): flash("This list does not belong to your account") return redirect(url_for('list.show', list_id=list_id)) name = request.form.get("name") if not name: flash("Please provide a name") return redirect(url_for('list.show', list_id=list_id)) list.name = name db_session.add(list) db_session.commit() flash("List %s was updated" % list.name) return redirect(url_for('list.show', list_id=list_id))
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- """List views.""" from app.server.auth import login_required from app.server.database import db_session from app.server.database.list import List from flask import session as login_session from flask import Blueprint, render_template, redirect from flask import jsonify, request, flash, url_for list = Blueprint("list", __name__, template_folder="templates") @list.route('/json') @list.route('/') def index(): """Get all lists.""" data = List.query.all() if '/json' in request.path: lists = [] for list in data: lists.append(list.as_dict()) return jsonify(lists) else: return render_template('list/index.html', lists=data) @list.route('/<int:list_id>/json') @list.route('/<int:list_id>') def show(list_id): """Get single list via id.""" data = db_session.query(List).filter(List.id == list_id).first() if '/json' in request.path: return jsonify(data.as_dict()) else: return render_template('list/show.html', list=data) @list.route('/create', methods=['POST']) @login_required def create(): """Store new list.""" name = request.form.get("name") if not name: flash("Please provide a name") return redirect(url_for('list.index')) list = List(name=name, user_id=login_session['user_id']) db_session.add(list) db_session.commit() flash("New list %s created" % name) return redirect(url_for('list.index')) @list.route('/<int:list_id>/destroy', methods=['GET']) @login_required def destory(list_id): """Delete list.""" list = db_session.query(List).filter(List.id == list_id).first() if(list.user_id != login_session['user_id']): flash("This list does not belong to your account") return redirect(url_for('list.index')) db_session.delete(list) db_session.commit() flash("List %s destroyed" % list.name) return redirect(url_for('list.index')) @list.route('/<int:list_id>/update', methods=['POST']) @login_required def update(list_id): """Update list.""" list = db_session.query(List).filter(List.id == list_id).first() if(list.user_id != login_session['user_id']): flash("This list does not belong to your account") return redirect(url_for('list.show', list_id=list_id)) name = request.form.get("name") if not name: flash("Please provide a name") return redirect(url_for('list.show', list_id=list_id)) list.name = name db_session.add(list) db_session.commit() flash("List %s was updated" % list.name) return redirect(url_for('list.show', list_id=list_id))
en
0.747921
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- List views. Get all lists. Get single list via id. Store new list. Delete list. Update list.
2.597986
3
src/scripts/make_data_birdsongrec.py
JaerongA/tweetynet
1
6629801
<reponame>JaerongA/tweetynet #!/usr/bin/env python # coding: utf-8 """ (re)make data directory containing results obtained with BirdsongRecognition data repository uses config.ini files from src/config/ to find the results of creating a learning curve for song of each individual bird in directory, then copies sub-directory containing results of measuring accuracy on training and test sets to the ./data/BirdsongRecognition directory """ from configparser import ConfigParser from pathlib import Path import shutil REPO_ROOT = Path('~/Documents/repos/coding/birdsong/tweetynet/') REPO_ROOT = REPO_ROOT.expanduser() CONFIGS_DIR = REPO_ROOT.joinpath('src/configs/') BR_CONFIGS = sorted(list(CONFIGS_DIR.glob('*BirdsongRecognition*ini'))) BR_CONFIGS = [str(config) for config in BR_CONFIGS] if not all([f'bird0{i}' in br_config for i, br_config in enumerate(BR_CONFIGS)]): raise ValueError( "could not find all config.ini files for BirdsongRecognition " "in consecutive order (i.e., 10 files with names that end in " "bird00.ini, bird01.ini, ... bird09.ini)" ) BR_DATA_ROOT = REPO_ROOT.joinpath('data/BirdsongRecognition') # path to root of repository that contains results from running learncurve.train with each config.ini file NEW_PARENT = '/media/art/HD-LCU3/tweetynet_paper/BirdsongRecognition' # "roots" of paths in config.ini files that should be replaced with NEW_PARENT OLD_PARENTS = [ '/home/nickledave/Documents/data/BirdsongRecognition/vak', '~/Documents/data/birdsong/BirdsongRecognition/vak', '~/Documents/data/BirdsongRecognition/vak', '~/Documents/data/birdsong/vak', ] def change_parent(path, new_parent=NEW_PARENT, old_parents=OLD_PARENTS): """changes parent directory of a path, given a list of possible 'old' parents and the new parent that should replace any of those""" path = str(path) for old_parent in OLD_PARENTS: if old_parent in path: path = path.replace(old_parent, new_parent) assert new_parent in path, f'did not find parent to replace in {path}' path = Path(path) return path def remove_subdirs(root_dir=BR_DATA_ROOT): """removes all sub-directories from a directory""" subdirs = [subdir for subdir in root_dir.iterdir() if subdir.is_dir()] for subdir in subdirs: shutil.rmtree(subdir) def copy_test_dirs(br_configs=BR_CONFIGS, br_data_root=BR_DATA_ROOT, new_parent=NEW_PARENT, old_parents=OLD_PARENTS): """copy test dir to root, using path from .ini file Parameters ---------- old_parent : list of str new_parent : st br_configs : list of str, paths to config.ini files for BirdsongRecognition repository """ config_obj = ConfigParser() for birdnum, config_ini in enumerate(br_configs): config_obj.read(config_ini) results_dirname = config_obj['OUTPUT']['results_dir_made_by_main_script'] results_dirname = Path(results_dirname) src = results_dirname.joinpath('test') src = change_parent(src, new_parent, old_parents) dst = br_data_root.joinpath(f'Bird{birdnum}') if dst.exists(): raise ValueError(f"can't copy to directory, already exists: {dst}") shutil.copytree(src, dst) def main(): remove_subdirs() copy_test_dirs() if __name__ == '__main__': main()
#!/usr/bin/env python # coding: utf-8 """ (re)make data directory containing results obtained with BirdsongRecognition data repository uses config.ini files from src/config/ to find the results of creating a learning curve for song of each individual bird in directory, then copies sub-directory containing results of measuring accuracy on training and test sets to the ./data/BirdsongRecognition directory """ from configparser import ConfigParser from pathlib import Path import shutil REPO_ROOT = Path('~/Documents/repos/coding/birdsong/tweetynet/') REPO_ROOT = REPO_ROOT.expanduser() CONFIGS_DIR = REPO_ROOT.joinpath('src/configs/') BR_CONFIGS = sorted(list(CONFIGS_DIR.glob('*BirdsongRecognition*ini'))) BR_CONFIGS = [str(config) for config in BR_CONFIGS] if not all([f'bird0{i}' in br_config for i, br_config in enumerate(BR_CONFIGS)]): raise ValueError( "could not find all config.ini files for BirdsongRecognition " "in consecutive order (i.e., 10 files with names that end in " "bird00.ini, bird01.ini, ... bird09.ini)" ) BR_DATA_ROOT = REPO_ROOT.joinpath('data/BirdsongRecognition') # path to root of repository that contains results from running learncurve.train with each config.ini file NEW_PARENT = '/media/art/HD-LCU3/tweetynet_paper/BirdsongRecognition' # "roots" of paths in config.ini files that should be replaced with NEW_PARENT OLD_PARENTS = [ '/home/nickledave/Documents/data/BirdsongRecognition/vak', '~/Documents/data/birdsong/BirdsongRecognition/vak', '~/Documents/data/BirdsongRecognition/vak', '~/Documents/data/birdsong/vak', ] def change_parent(path, new_parent=NEW_PARENT, old_parents=OLD_PARENTS): """changes parent directory of a path, given a list of possible 'old' parents and the new parent that should replace any of those""" path = str(path) for old_parent in OLD_PARENTS: if old_parent in path: path = path.replace(old_parent, new_parent) assert new_parent in path, f'did not find parent to replace in {path}' path = Path(path) return path def remove_subdirs(root_dir=BR_DATA_ROOT): """removes all sub-directories from a directory""" subdirs = [subdir for subdir in root_dir.iterdir() if subdir.is_dir()] for subdir in subdirs: shutil.rmtree(subdir) def copy_test_dirs(br_configs=BR_CONFIGS, br_data_root=BR_DATA_ROOT, new_parent=NEW_PARENT, old_parents=OLD_PARENTS): """copy test dir to root, using path from .ini file Parameters ---------- old_parent : list of str new_parent : st br_configs : list of str, paths to config.ini files for BirdsongRecognition repository """ config_obj = ConfigParser() for birdnum, config_ini in enumerate(br_configs): config_obj.read(config_ini) results_dirname = config_obj['OUTPUT']['results_dir_made_by_main_script'] results_dirname = Path(results_dirname) src = results_dirname.joinpath('test') src = change_parent(src, new_parent, old_parents) dst = br_data_root.joinpath(f'Bird{birdnum}') if dst.exists(): raise ValueError(f"can't copy to directory, already exists: {dst}") shutil.copytree(src, dst) def main(): remove_subdirs() copy_test_dirs() if __name__ == '__main__': main()
en
0.806983
#!/usr/bin/env python # coding: utf-8 (re)make data directory containing results obtained with BirdsongRecognition data repository uses config.ini files from src/config/ to find the results of creating a learning curve for song of each individual bird in directory, then copies sub-directory containing results of measuring accuracy on training and test sets to the ./data/BirdsongRecognition directory # path to root of repository that contains results from running learncurve.train with each config.ini file # "roots" of paths in config.ini files that should be replaced with NEW_PARENT changes parent directory of a path, given a list of possible 'old' parents and the new parent that should replace any of those removes all sub-directories from a directory copy test dir to root, using path from .ini file Parameters ---------- old_parent : list of str new_parent : st br_configs : list of str, paths to config.ini files for BirdsongRecognition repository
2.433612
2
output/models/nist_data/list_pkg/non_negative_integer/schema_instance/nistschema_sv_iv_list_non_negative_integer_enumeration_3_xsd/nistschema_sv_iv_list_non_negative_integer_enumeration_3.py
tefra/xsdata-w3c-tests
1
6629802
<gh_stars>1-10 from dataclasses import dataclass, field from enum import Enum from typing import Optional __NAMESPACE__ = "NISTSchema-SV-IV-list-nonNegativeInteger-enumeration-3-NS" class NistschemaSvIvListNonNegativeIntegerEnumeration3Type(Enum): VALUE_2593582_35325331953622096_59928376274271011_369143139238641_4047522700962855_448_170037595650387 = ( 2593582, 35325331953622096, 59928376274271011, 369143139238641, 4047522700962855, 448, 170037595650387, ) VALUE_92674721098862222_56988794944_8995095162_445536483080688_90424057287271242_1743203984 = ( 92674721098862222, 56988794944, 8995095162, 445536483080688, 90424057287271242, 1743203984, ) VALUE_28073718705972_98_86037857651139_5404144927_61_15838367297_930080587702_657044161899199_42696566691343 = ( 28073718705972, 98, 86037857651139, 5404144927, 61, 15838367297, 930080587702, 657044161899199, 42696566691343, ) VALUE_485677282_87913_26790935902494_459164_4084749_88478687099512742 = ( 485677282, 87913, 26790935902494, 459164, 4084749, 88478687099512742, ) VALUE_5289_61_67984470324_848558598617_716144_7551087161_2188_74430_54147_33 = ( 5289, 61, 67984470324, 848558598617, 716144, 7551087161, 2188, 74430, 54147, 33, ) VALUE_693_7324_20_7475_4947489_80584759_9768357488_66469880_746558290 = ( 693, 7324, 20, 7475, 4947489, 80584759, 9768357488, 66469880, 746558290, ) VALUE_20_9857_82940265628_79_509550797499 = ( 20, 9857, 82940265628, 79, 509550797499, ) VALUE_1717402_88246753_32231773_93037482909724356_94687362_2385005448652326_14 = ( 1717402, 88246753, 32231773, 93037482909724356, 94687362, 2385005448652326, 14, ) @dataclass class NistschemaSvIvListNonNegativeIntegerEnumeration3: class Meta: name = "NISTSchema-SV-IV-list-nonNegativeInteger-enumeration-3" namespace = "NISTSchema-SV-IV-list-nonNegativeInteger-enumeration-3-NS" value: Optional[NistschemaSvIvListNonNegativeIntegerEnumeration3Type] = field( default=None, metadata={ "required": True, } )
from dataclasses import dataclass, field from enum import Enum from typing import Optional __NAMESPACE__ = "NISTSchema-SV-IV-list-nonNegativeInteger-enumeration-3-NS" class NistschemaSvIvListNonNegativeIntegerEnumeration3Type(Enum): VALUE_2593582_35325331953622096_59928376274271011_369143139238641_4047522700962855_448_170037595650387 = ( 2593582, 35325331953622096, 59928376274271011, 369143139238641, 4047522700962855, 448, 170037595650387, ) VALUE_92674721098862222_56988794944_8995095162_445536483080688_90424057287271242_1743203984 = ( 92674721098862222, 56988794944, 8995095162, 445536483080688, 90424057287271242, 1743203984, ) VALUE_28073718705972_98_86037857651139_5404144927_61_15838367297_930080587702_657044161899199_42696566691343 = ( 28073718705972, 98, 86037857651139, 5404144927, 61, 15838367297, 930080587702, 657044161899199, 42696566691343, ) VALUE_485677282_87913_26790935902494_459164_4084749_88478687099512742 = ( 485677282, 87913, 26790935902494, 459164, 4084749, 88478687099512742, ) VALUE_5289_61_67984470324_848558598617_716144_7551087161_2188_74430_54147_33 = ( 5289, 61, 67984470324, 848558598617, 716144, 7551087161, 2188, 74430, 54147, 33, ) VALUE_693_7324_20_7475_4947489_80584759_9768357488_66469880_746558290 = ( 693, 7324, 20, 7475, 4947489, 80584759, 9768357488, 66469880, 746558290, ) VALUE_20_9857_82940265628_79_509550797499 = ( 20, 9857, 82940265628, 79, 509550797499, ) VALUE_1717402_88246753_32231773_93037482909724356_94687362_2385005448652326_14 = ( 1717402, 88246753, 32231773, 93037482909724356, 94687362, 2385005448652326, 14, ) @dataclass class NistschemaSvIvListNonNegativeIntegerEnumeration3: class Meta: name = "NISTSchema-SV-IV-list-nonNegativeInteger-enumeration-3" namespace = "NISTSchema-SV-IV-list-nonNegativeInteger-enumeration-3-NS" value: Optional[NistschemaSvIvListNonNegativeIntegerEnumeration3Type] = field( default=None, metadata={ "required": True, } )
none
1
2.375718
2
masonite/info.py
Kush22/core
0
6629803
"""Module for specifying the Masonite version in a central location. """ VERSION = '2.0.25'
"""Module for specifying the Masonite version in a central location. """ VERSION = '2.0.25'
en
0.666915
Module for specifying the Masonite version in a central location.
1.345234
1
demo.py
liefswanson/455final
0
6629804
<filename>demo.py from tkinter import filedialog from math import ceil import math from matplotlib.image import imread, imsave from matplotlib.ticker import FuncFormatter import matplotlib.pyplot as plt from timeit import default_timer as timer from jinja2 import Template import pycuda.autoinit import pycuda.driver as drv import numpy from pycuda.compiler import SourceModule from tkinter import * from PIL import Image, ImageTk def test_chiaroscuro(img): return(cpu_chiaroscuro, gpu_chiaroscuro(img)) def cpu_chiaroscuro(img): result = numpy.zeros_like(img) height, width, depth = img.shape for row in range(height): for col in range(width): r = img[row,col, 0] / 255.0 g = img[row,col, 1] / 255.0 b = img[row,col, 2] / 255.0 intensity = 0.21*r + 0.72*g + 0.7*b if intensity == 0.0: intensity = 1.0 r = math.pow(r, 1.0/intensity) g = math.pow(g, 1.0/intensity) b = math.pow(b, 1.0/intensity) result[row, col, 0] = round(r * 255.0) result[row, col, 1] = round(g * 255.0) result[row, col, 2] = round(b * 255.0) return result def gpu_chiaroscuro(img): template = Template(""" __global__ void chiaroscuro(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int idx = col*{{depth}} + row*{{depth}}*{{height}}; if (idx+2 > {{width}}*{{height}}*{{depth}}) { return; } const float r = img[idx] / 255.0f; const float g = img[idx+1] / 255.0f; const float b = img[idx+2] / 255.0f; float intensity = 0.21f*r + 0.72*g + 0.07*b; if (intensity == 0.0f) { intensity = 1.0f; } const float out_r = __powf(r, 1.0f/intensity); const float out_g = __powf(g, 1.0f/intensity); const float out_b = __powf(b, 1.0f/intensity); dest[idx] = __float2int_rn(out_r*255.0f); dest[idx+1] = __float2int_rn(out_g*255.0f); dest[idx+2] = __float2int_rn(out_b*255.0f); } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) gpu_brighten = module.get_function("chiaroscuro") block = (8,8,1) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def gpu_run_effect(gpu_instructions, img): effect, block, grid = gpu_instructions dest = numpy.zeros_like(img) effect(drv.Out(dest), drv.In(img), block=block, grid=grid) return dest def test_gamma_half(img): return(cpu_gamma_half, gpu_gamma_half(img)) def gamma_half(px): px = px / 255; corrected = math.pow(px, 0.5) corrected *= 255.0 return round(corrected) def cpu_gamma_half(img): fn = numpy.vectorize(gamma_half) return fn(img) def gpu_gamma_half(img): template = Template(""" __global__ void gamma_half(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const float px = img[idx] / 255.0f; float out = __powf(px, 0.5f); dest[idx] = __float2int_rn(out*255.0f); } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("gamma_half") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def test_gamma_two(img): return(cpu_gamma_two, gpu_gamma_two(img)) def gamma_two(px): px = px / 255; corrected = math.pow(px, 2.0) if px > 1.0: px = 1.0 if px < 0.0: px = 0.0 corrected *= 255.0 return round(corrected) def cpu_gamma_two(img): fn = numpy.vectorize(gamma_two) return fn(img) def gpu_gamma_two(img): template = Template(""" __global__ void gamma_two(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const float px = img[idx] / 255.0f; float out = __powf(px, 2); dest[idx] = __float2int_rn(out*255.0f); } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("gamma_two") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def test_brighten(img): return (cpu_brighten, gpu_brighten(img)) def cpu_brighten(img): fn = numpy.vectorize(brighten_vectorized) return fn(img) def gpu_brighten(img): template = Template(""" __global__ void brighten(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const int px = img[idx]; const int temp = px*2; if (0 > temp){ dest[idx] = 0; return; } if (255 < temp){ dest[idx] = 255; return; } dest[idx] = temp; } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("brighten") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def brighten_vectorized(px): temp = px * 2 return clamp(0, 255, temp) def test_edge_detection(img): return (edge_detect, gpu_edge_detect(img)) def edge_detect(img): result = numpy.zeros_like(img) width, height, depth = img.shape for row in range(1, width-1): for col in range(1, height-1): total = 0 for chan in range(depth): total += 8*img[row,col,chan] total -= img[row-1,col-1,chan] total -= img[row-1,col,chan] total -= img[row-1,col+1,chan] total -= img[row,col-1,chan] total -= img[row,col+1,chan] total -= img[row+1,col-1,chan] total -= img[row+1,col,chan] total -= img[row+1,col+1,chan] result[row,col,0] = clamp(0,255,total) result[row,col,1] = clamp(0,255,total) result[row,col,2] = clamp(0,255,total) return result def gpu_edge_detect(img): template = Template(""" __global__ void edge(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; if (row < 1 || row >= {{width}} - 1 || col < 1 || col >= {{height}} - 1) { return; } int total = 0; for (int chan = 0; chan < {{depth}}; chan++) { const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; const int down = {{depth}}*{{height}}; const int right = {{depth}}; total += 8 * img[idx]; total -= img[idx - down - right]; total -= img[idx - down]; total -= img[idx - down + right]; total -= img[idx - down]; total -= img[idx + down]; total -= img[idx + down - right]; total -= img[idx + down]; total -= img[idx + down + right]; } const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (0 > total){ dest[idx] = 0; dest[idx+1] = 0; dest[idx+2] = 0; return; } if (255 < total){ dest[idx] = 255; dest[idx+1] = 255; dest[idx+2] = 255; return; } dest[idx] = total; dest[idx+1] = total; dest[idx+2] = total; } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("edge") block = (8,8,1) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def clamp(low, high, val): if low > val: return low if high < val: return high return val chosen_file = "" chosen_effect = "" switch = { 'Edge Detection': test_edge_detection, 'Brighten x2': test_brighten, 'Gamma 2.0': test_gamma_two, 'Gamma 0.5': test_gamma_half, 'Chiaroscuro': test_chiaroscuro, } ############################################################################################### ## UI ############################################################################################### root = Tk() root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("png files","*.png"),("all files","*.*"))) root.title("Select an effect to apply") # Add a grid mainframe = Frame(root) mainframe.grid(column=0,row=0, sticky=(N,W,E,S) ) mainframe.columnconfigure(0, weight = 1) mainframe.rowconfigure(0, weight = 1) mainframe.pack(pady = 100, padx = 100) # Create a Tkinter variable tkvar = StringVar(root) # Dictionary with options choices = list(switch.keys()) tkvar.set('Select an effect...') # set the default option popupMenu = OptionMenu(mainframe, tkvar, *choices) Label(mainframe, text="Choose a dish").grid(row = 1, column = 1) popupMenu.grid(row = 2, column =1) # on change dropdown value def change_dropdown(*args): root.chosen_effect = tkvar.get() root.destroy() # link function to change dropdown tkvar.trace('w', change_dropdown) root.mainloop() chosen_file = root.filename chosen_effect = root.chosen_effect img = imread(chosen_file) cpu_renderer, gpu_renderer = switch[chosen_effect](img) imsave('/tmp/cpu_render.png', cpu_renderer(img)) imsave('/tmp/gpu_render.png', gpu_run_effect(gpu_renderer, img)) class ImgFrame(Frame): def __init__(self, master, path): Frame.__init__(self, master) self.columnconfigure(0,weight=1) self.rowconfigure(0,weight=1) self.original = Image.open(path) self.image = ImageTk.PhotoImage(self.original) self.display = Canvas(self, bd=0, highlightthickness=0) self.display.create_image(0, 0, image=self.image, anchor=NW, tags="IMG") self.display.grid(row=0, sticky=W+E+N+S) self.pack(fill=BOTH, expand=1) self.bind("<Configure>", self.resize) def resize(self, event): w, h = self.original.size ew, eh = event.width, event.height ratio = w/h if ew < eh * ratio: size = (round(eh*ratio), eh) else: size = (ew, round(ew/ratio)) resized = self.original.resize(size,Image.ANTIALIAS) self.image = ImageTk.PhotoImage(resized) self.display.delete("IMG") self.display.create_image(0, 0, image=self.image, anchor=NW, tags="IMG") width, height, _ = img.shape ratio = round(width/height) root = Tk() root.title(chosen_file + ' ' + chosen_effect) left = Label(root) left.pack(side=LEFT) mid = Label(root) mid.pack(side=LEFT) right = Label(root) right.pack(side=LEFT) left = ImgFrame(left, '/tmp/cpu_render.png') mid = ImgFrame(mid, chosen_file) right = ImgFrame(right, '/tmp/gpu_render.png') def perf_test(frames, name, cpu_renderer, gpu_renderer, img, root, output): cpu_control, gpu_control = build_control(img) cpu_total, cpu_control_total = test(frames, name, cpu_renderer, cpu_control, img) gpu_total, gpu_control_total = gpu_test(frames, name, gpu_renderer, gpu_control, img) f = open('/tmp/gpu_perf_results', 'w') f.write(str(cpu_total) + '\n') f.write(str(cpu_control_total) + '\n') f.write(str(gpu_total) + '\n') f.write(str(gpu_control_total) + '\n') output.append(cpu_total) output.append(cpu_control_total) output.append(gpu_total) output.append(gpu_control_total) root.destroy() def test(frames, name, fn, ctrl, img): fn(img) start = timer() for _ in range(frames): result = fn(img) total = timer() - start ctrl(img) start = timer() for _ in range(frames): result = ctrl(img) control_total = timer() - start print(name + ' cpu-test') print('%f fps' % (frames / total)) # actual time print('%f fps' % (frames / control_total)) # travel time print('%f fps' % (frames / (total-control_total))) # theoretical best case (actual - travel) return (total, control_total) def gpu_test(frames, name, fn, ctrl, img): gpu_run_effect(fn, img) start = timer() for _ in range(frames): result = gpu_run_effect(fn, img) total = timer() - start gpu_run_effect(ctrl, img) start = timer() for _ in range(frames): result = gpu_run_effect(ctrl, img) control_total = timer() - start print(name + ' gpu-test') print('%f fps' % (frames / total)) # actual time print('%f fps' % (frames / control_total)) # travel time print('%f fps' % (frames / (total-control_total))) # theoretical best case (actual - travel) return (total, control_total) def build_control(img): control_template = Template(""" __global__ void control(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } dest[idx] = img[idx]; } """) width, height, depth = img.shape module = SourceModule(control_template.render(width=width, height=height, depth=depth)) gpu_brighten = module.get_function("control") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) gpu_control = (gpu_brighten, block, grid) def vectorized_control(px): return px cpu_control = numpy.vectorize(vectorized_control) return (cpu_control, gpu_control) frames = 100 results = list() name = chosen_effect + ' ' + chosen_file button = Button(root, text='run perf test', command=lambda: perf_test(frames, name,cpu_renderer, gpu_renderer, img, root, results)) button.pack(side=BOTTOM) root.mainloop() if len(results) == 0: exit() def fps_calc(total, control_total): fps = frames/total control_fps = frames/control_total best_fps = frames/(total-control_total) return (fps, control_fps, best_fps) cpu_fps, cpu_control_fps, cpu_best_fps = fps_calc(results[0], results[1]) gpu_fps, gpu_control_fps, gpu_best_fps = fps_calc(results[2], results[3]) fps_stats = [cpu_fps, cpu_control_fps, gpu_fps, gpu_control_fps] def fps_formatter(x, pos): return '%1.0f FPS' % x formatter = FuncFormatter(fps_formatter) x = numpy.arange(4) _, axs = plt.subplots(nrows=1, ncols=2, sharex=True) ax = axs[0] ax.yaxis.set_major_formatter(formatter) ax.set_ylabel('FPS linear scale') ax.bar(x, fps_stats) plt.xticks(x, ('CPU', 'CPU CTRL', 'GPU', 'GPU CTRL')) ax = axs[1] ax.set_yscale("log", nonposy='clip') ax.set_ylabel('FPS logarithmic scale') ax.bar(x, fps_stats) fig = plt.gcf() fig.canvas.set_window_title(name) plt.tight_layout() plt.show()
<filename>demo.py from tkinter import filedialog from math import ceil import math from matplotlib.image import imread, imsave from matplotlib.ticker import FuncFormatter import matplotlib.pyplot as plt from timeit import default_timer as timer from jinja2 import Template import pycuda.autoinit import pycuda.driver as drv import numpy from pycuda.compiler import SourceModule from tkinter import * from PIL import Image, ImageTk def test_chiaroscuro(img): return(cpu_chiaroscuro, gpu_chiaroscuro(img)) def cpu_chiaroscuro(img): result = numpy.zeros_like(img) height, width, depth = img.shape for row in range(height): for col in range(width): r = img[row,col, 0] / 255.0 g = img[row,col, 1] / 255.0 b = img[row,col, 2] / 255.0 intensity = 0.21*r + 0.72*g + 0.7*b if intensity == 0.0: intensity = 1.0 r = math.pow(r, 1.0/intensity) g = math.pow(g, 1.0/intensity) b = math.pow(b, 1.0/intensity) result[row, col, 0] = round(r * 255.0) result[row, col, 1] = round(g * 255.0) result[row, col, 2] = round(b * 255.0) return result def gpu_chiaroscuro(img): template = Template(""" __global__ void chiaroscuro(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int idx = col*{{depth}} + row*{{depth}}*{{height}}; if (idx+2 > {{width}}*{{height}}*{{depth}}) { return; } const float r = img[idx] / 255.0f; const float g = img[idx+1] / 255.0f; const float b = img[idx+2] / 255.0f; float intensity = 0.21f*r + 0.72*g + 0.07*b; if (intensity == 0.0f) { intensity = 1.0f; } const float out_r = __powf(r, 1.0f/intensity); const float out_g = __powf(g, 1.0f/intensity); const float out_b = __powf(b, 1.0f/intensity); dest[idx] = __float2int_rn(out_r*255.0f); dest[idx+1] = __float2int_rn(out_g*255.0f); dest[idx+2] = __float2int_rn(out_b*255.0f); } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) gpu_brighten = module.get_function("chiaroscuro") block = (8,8,1) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def gpu_run_effect(gpu_instructions, img): effect, block, grid = gpu_instructions dest = numpy.zeros_like(img) effect(drv.Out(dest), drv.In(img), block=block, grid=grid) return dest def test_gamma_half(img): return(cpu_gamma_half, gpu_gamma_half(img)) def gamma_half(px): px = px / 255; corrected = math.pow(px, 0.5) corrected *= 255.0 return round(corrected) def cpu_gamma_half(img): fn = numpy.vectorize(gamma_half) return fn(img) def gpu_gamma_half(img): template = Template(""" __global__ void gamma_half(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const float px = img[idx] / 255.0f; float out = __powf(px, 0.5f); dest[idx] = __float2int_rn(out*255.0f); } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("gamma_half") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def test_gamma_two(img): return(cpu_gamma_two, gpu_gamma_two(img)) def gamma_two(px): px = px / 255; corrected = math.pow(px, 2.0) if px > 1.0: px = 1.0 if px < 0.0: px = 0.0 corrected *= 255.0 return round(corrected) def cpu_gamma_two(img): fn = numpy.vectorize(gamma_two) return fn(img) def gpu_gamma_two(img): template = Template(""" __global__ void gamma_two(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const float px = img[idx] / 255.0f; float out = __powf(px, 2); dest[idx] = __float2int_rn(out*255.0f); } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("gamma_two") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def test_brighten(img): return (cpu_brighten, gpu_brighten(img)) def cpu_brighten(img): fn = numpy.vectorize(brighten_vectorized) return fn(img) def gpu_brighten(img): template = Template(""" __global__ void brighten(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const int px = img[idx]; const int temp = px*2; if (0 > temp){ dest[idx] = 0; return; } if (255 < temp){ dest[idx] = 255; return; } dest[idx] = temp; } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("brighten") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def brighten_vectorized(px): temp = px * 2 return clamp(0, 255, temp) def test_edge_detection(img): return (edge_detect, gpu_edge_detect(img)) def edge_detect(img): result = numpy.zeros_like(img) width, height, depth = img.shape for row in range(1, width-1): for col in range(1, height-1): total = 0 for chan in range(depth): total += 8*img[row,col,chan] total -= img[row-1,col-1,chan] total -= img[row-1,col,chan] total -= img[row-1,col+1,chan] total -= img[row,col-1,chan] total -= img[row,col+1,chan] total -= img[row+1,col-1,chan] total -= img[row+1,col,chan] total -= img[row+1,col+1,chan] result[row,col,0] = clamp(0,255,total) result[row,col,1] = clamp(0,255,total) result[row,col,2] = clamp(0,255,total) return result def gpu_edge_detect(img): template = Template(""" __global__ void edge(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; if (row < 1 || row >= {{width}} - 1 || col < 1 || col >= {{height}} - 1) { return; } int total = 0; for (int chan = 0; chan < {{depth}}; chan++) { const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; const int down = {{depth}}*{{height}}; const int right = {{depth}}; total += 8 * img[idx]; total -= img[idx - down - right]; total -= img[idx - down]; total -= img[idx - down + right]; total -= img[idx - down]; total -= img[idx + down]; total -= img[idx + down - right]; total -= img[idx + down]; total -= img[idx + down + right]; } const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (0 > total){ dest[idx] = 0; dest[idx+1] = 0; dest[idx+2] = 0; return; } if (255 < total){ dest[idx] = 255; dest[idx+1] = 255; dest[idx+2] = 255; return; } dest[idx] = total; dest[idx+1] = total; dest[idx+2] = total; } """) width, height, depth = img.shape module = SourceModule(template.render(width=width, height=height, depth=depth)) dest = numpy.zeros_like(img) gpu_brighten = module.get_function("edge") block = (8,8,1) grid = (ceil(width/8), ceil(height/8)) return (gpu_brighten, block, grid) def clamp(low, high, val): if low > val: return low if high < val: return high return val chosen_file = "" chosen_effect = "" switch = { 'Edge Detection': test_edge_detection, 'Brighten x2': test_brighten, 'Gamma 2.0': test_gamma_two, 'Gamma 0.5': test_gamma_half, 'Chiaroscuro': test_chiaroscuro, } ############################################################################################### ## UI ############################################################################################### root = Tk() root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("png files","*.png"),("all files","*.*"))) root.title("Select an effect to apply") # Add a grid mainframe = Frame(root) mainframe.grid(column=0,row=0, sticky=(N,W,E,S) ) mainframe.columnconfigure(0, weight = 1) mainframe.rowconfigure(0, weight = 1) mainframe.pack(pady = 100, padx = 100) # Create a Tkinter variable tkvar = StringVar(root) # Dictionary with options choices = list(switch.keys()) tkvar.set('Select an effect...') # set the default option popupMenu = OptionMenu(mainframe, tkvar, *choices) Label(mainframe, text="Choose a dish").grid(row = 1, column = 1) popupMenu.grid(row = 2, column =1) # on change dropdown value def change_dropdown(*args): root.chosen_effect = tkvar.get() root.destroy() # link function to change dropdown tkvar.trace('w', change_dropdown) root.mainloop() chosen_file = root.filename chosen_effect = root.chosen_effect img = imread(chosen_file) cpu_renderer, gpu_renderer = switch[chosen_effect](img) imsave('/tmp/cpu_render.png', cpu_renderer(img)) imsave('/tmp/gpu_render.png', gpu_run_effect(gpu_renderer, img)) class ImgFrame(Frame): def __init__(self, master, path): Frame.__init__(self, master) self.columnconfigure(0,weight=1) self.rowconfigure(0,weight=1) self.original = Image.open(path) self.image = ImageTk.PhotoImage(self.original) self.display = Canvas(self, bd=0, highlightthickness=0) self.display.create_image(0, 0, image=self.image, anchor=NW, tags="IMG") self.display.grid(row=0, sticky=W+E+N+S) self.pack(fill=BOTH, expand=1) self.bind("<Configure>", self.resize) def resize(self, event): w, h = self.original.size ew, eh = event.width, event.height ratio = w/h if ew < eh * ratio: size = (round(eh*ratio), eh) else: size = (ew, round(ew/ratio)) resized = self.original.resize(size,Image.ANTIALIAS) self.image = ImageTk.PhotoImage(resized) self.display.delete("IMG") self.display.create_image(0, 0, image=self.image, anchor=NW, tags="IMG") width, height, _ = img.shape ratio = round(width/height) root = Tk() root.title(chosen_file + ' ' + chosen_effect) left = Label(root) left.pack(side=LEFT) mid = Label(root) mid.pack(side=LEFT) right = Label(root) right.pack(side=LEFT) left = ImgFrame(left, '/tmp/cpu_render.png') mid = ImgFrame(mid, chosen_file) right = ImgFrame(right, '/tmp/gpu_render.png') def perf_test(frames, name, cpu_renderer, gpu_renderer, img, root, output): cpu_control, gpu_control = build_control(img) cpu_total, cpu_control_total = test(frames, name, cpu_renderer, cpu_control, img) gpu_total, gpu_control_total = gpu_test(frames, name, gpu_renderer, gpu_control, img) f = open('/tmp/gpu_perf_results', 'w') f.write(str(cpu_total) + '\n') f.write(str(cpu_control_total) + '\n') f.write(str(gpu_total) + '\n') f.write(str(gpu_control_total) + '\n') output.append(cpu_total) output.append(cpu_control_total) output.append(gpu_total) output.append(gpu_control_total) root.destroy() def test(frames, name, fn, ctrl, img): fn(img) start = timer() for _ in range(frames): result = fn(img) total = timer() - start ctrl(img) start = timer() for _ in range(frames): result = ctrl(img) control_total = timer() - start print(name + ' cpu-test') print('%f fps' % (frames / total)) # actual time print('%f fps' % (frames / control_total)) # travel time print('%f fps' % (frames / (total-control_total))) # theoretical best case (actual - travel) return (total, control_total) def gpu_test(frames, name, fn, ctrl, img): gpu_run_effect(fn, img) start = timer() for _ in range(frames): result = gpu_run_effect(fn, img) total = timer() - start gpu_run_effect(ctrl, img) start = timer() for _ in range(frames): result = gpu_run_effect(ctrl, img) control_total = timer() - start print(name + ' gpu-test') print('%f fps' % (frames / total)) # actual time print('%f fps' % (frames / control_total)) # travel time print('%f fps' % (frames / (total-control_total))) # theoretical best case (actual - travel) return (total, control_total) def build_control(img): control_template = Template(""" __global__ void control(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } dest[idx] = img[idx]; } """) width, height, depth = img.shape module = SourceModule(control_template.render(width=width, height=height, depth=depth)) gpu_brighten = module.get_function("control") block = (8,8,3) grid = (ceil(width/8), ceil(height/8)) gpu_control = (gpu_brighten, block, grid) def vectorized_control(px): return px cpu_control = numpy.vectorize(vectorized_control) return (cpu_control, gpu_control) frames = 100 results = list() name = chosen_effect + ' ' + chosen_file button = Button(root, text='run perf test', command=lambda: perf_test(frames, name,cpu_renderer, gpu_renderer, img, root, results)) button.pack(side=BOTTOM) root.mainloop() if len(results) == 0: exit() def fps_calc(total, control_total): fps = frames/total control_fps = frames/control_total best_fps = frames/(total-control_total) return (fps, control_fps, best_fps) cpu_fps, cpu_control_fps, cpu_best_fps = fps_calc(results[0], results[1]) gpu_fps, gpu_control_fps, gpu_best_fps = fps_calc(results[2], results[3]) fps_stats = [cpu_fps, cpu_control_fps, gpu_fps, gpu_control_fps] def fps_formatter(x, pos): return '%1.0f FPS' % x formatter = FuncFormatter(fps_formatter) x = numpy.arange(4) _, axs = plt.subplots(nrows=1, ncols=2, sharex=True) ax = axs[0] ax.yaxis.set_major_formatter(formatter) ax.set_ylabel('FPS linear scale') ax.bar(x, fps_stats) plt.xticks(x, ('CPU', 'CPU CTRL', 'GPU', 'GPU CTRL')) ax = axs[1] ax.set_yscale("log", nonposy='clip') ax.set_ylabel('FPS logarithmic scale') ax.bar(x, fps_stats) fig = plt.gcf() fig.canvas.set_window_title(name) plt.tight_layout() plt.show()
en
0.303272
__global__ void chiaroscuro(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int idx = col*{{depth}} + row*{{depth}}*{{height}}; if (idx+2 > {{width}}*{{height}}*{{depth}}) { return; } const float r = img[idx] / 255.0f; const float g = img[idx+1] / 255.0f; const float b = img[idx+2] / 255.0f; float intensity = 0.21f*r + 0.72*g + 0.07*b; if (intensity == 0.0f) { intensity = 1.0f; } const float out_r = __powf(r, 1.0f/intensity); const float out_g = __powf(g, 1.0f/intensity); const float out_b = __powf(b, 1.0f/intensity); dest[idx] = __float2int_rn(out_r*255.0f); dest[idx+1] = __float2int_rn(out_g*255.0f); dest[idx+2] = __float2int_rn(out_b*255.0f); } __global__ void gamma_half(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const float px = img[idx] / 255.0f; float out = __powf(px, 0.5f); dest[idx] = __float2int_rn(out*255.0f); } __global__ void gamma_two(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const float px = img[idx] / 255.0f; float out = __powf(px, 2); dest[idx] = __float2int_rn(out*255.0f); } __global__ void brighten(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } const int px = img[idx]; const int temp = px*2; if (0 > temp){ dest[idx] = 0; return; } if (255 < temp){ dest[idx] = 255; return; } dest[idx] = temp; } __global__ void edge(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; if (row < 1 || row >= {{width}} - 1 || col < 1 || col >= {{height}} - 1) { return; } int total = 0; for (int chan = 0; chan < {{depth}}; chan++) { const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; const int down = {{depth}}*{{height}}; const int right = {{depth}}; total += 8 * img[idx]; total -= img[idx - down - right]; total -= img[idx - down]; total -= img[idx - down + right]; total -= img[idx - down]; total -= img[idx + down]; total -= img[idx + down - right]; total -= img[idx + down]; total -= img[idx + down + right]; } const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (0 > total){ dest[idx] = 0; dest[idx+1] = 0; dest[idx+2] = 0; return; } if (255 < total){ dest[idx] = 255; dest[idx+1] = 255; dest[idx+2] = 255; return; } dest[idx] = total; dest[idx+1] = total; dest[idx+2] = total; } ############################################################################################### ## UI ############################################################################################### # Add a grid # Create a Tkinter variable # Dictionary with options # set the default option # on change dropdown value # link function to change dropdown # actual time # travel time # theoretical best case (actual - travel) # actual time # travel time # theoretical best case (actual - travel) __global__ void control(unsigned char *dest, unsigned char *img) { const int row = threadIdx.x + blockDim.x*blockIdx.x; const int col = threadIdx.y + blockDim.y*blockIdx.y; const int chan = threadIdx.z; const int idx = chan + col*{{depth}} + row*{{depth}}*{{height}}; if (idx > {{width}}*{{height}}*{{depth}}) { return; } dest[idx] = img[idx]; }
2.339118
2
terrascript/provider/datadog.py
mjuenema/python-terrascript
507
6629805
<filename>terrascript/provider/datadog.py # terrascript/provider/datadog.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:15:03 UTC) # # For imports without namespace, e.g. # # >>> import terrascript.provider.datadog # # instead of # # >>> import terrascript.provider.DataDog.datadog # # This is only available for 'official' and 'partner' providers. from terrascript.provider.DataDog.datadog import *
<filename>terrascript/provider/datadog.py # terrascript/provider/datadog.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:15:03 UTC) # # For imports without namespace, e.g. # # >>> import terrascript.provider.datadog # # instead of # # >>> import terrascript.provider.DataDog.datadog # # This is only available for 'official' and 'partner' providers. from terrascript.provider.DataDog.datadog import *
en
0.479998
# terrascript/provider/datadog.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:15:03 UTC) # # For imports without namespace, e.g. # # >>> import terrascript.provider.datadog # # instead of # # >>> import terrascript.provider.DataDog.datadog # # This is only available for 'official' and 'partner' providers.
1.275481
1
sympy/core/basic_methods.py
certik/sympy-oldcore
1
6629806
""" Implementation of Basic low-level methods. """ import decimal from assumptions import AssumeMeths # used for canonical ordering of symbolic sequences # via __cmp__ method: ordering_of_classes = [ # singleton numbers 'Zero', 'One','Half','Infinity','NaN','NegativeOne','NegativeInfinity', # numbers 'Integer','Rational','Real', # singleton symbols 'Exp1','Pi','ImaginaryUnit', # symbols 'Symbol','Wild','Temporary', # Functions that should come before Pow/Add/Mul 'ApplyConjugate', 'ApplyAbs', # arithmetic operations 'Pow', 'Mul', 'Add', # function values 'Apply', 'ApplyExp','ApplyLog', 'ApplySin','ApplyCos','ApplyTan','ApplyCot', 'ApplyASin','ApplyACos','ApplyATan','ApplyACot', 'ApplySinh','ApplyCosh','ApplyTanh','ApplyCoth', 'ApplyASinh','ApplyACosh','ApplyATanh','ApplyACoth', 'ApplyRisingFactorial','ApplyFallingFactorial', 'ApplyFactorial','ApplyBinomial', 'ApplyFloor', 'ApplyCeiling', 'ApplyRe','ApplyIm', 'ApplyArg', 'ApplySqrt','ApplySign', 'ApplyMrvLog', 'ApplyGamma','ApplyLowerGamma','ApplyUpperGamma','ApplyPolyGamma', 'ApplyErf', 'ApplyChebyshev','ApplyChebyshev2', 'Derivative','Integral', # defined singleton functions 'Abs','Sign','Sqrt', 'Floor', 'Ceiling', 'Re', 'Im', 'Arg', 'Conjugate', 'Exp','Log','MrvLog', 'Sin','Cos','Tan','Cot','ASin','ACos','ATan','ACot', 'Sinh','Cosh','Tanh','Coth','ASinh','ACosh','ATanh','ACoth', 'RisingFactorial','FallingFactorial', 'Factorial','Binomial', 'Gamma','LowerGamma','UpperGamma','PolyGamma', 'Erf', # special polynomials 'Chebyshev','Chebyshev2', # undefined functions 'Function','WildFunction', # anonymous functions 'Lambda', # operators 'FDerivative','FApply', # composition of functions 'FPow', 'Composition', # Landau O symbol 'Order', # relational operations 'Equality', 'Unequality', 'StrictInequality', 'Inequality', ] # def repr_level(flag=None, _cache=[1]): if flag is None: return _cache[0] old_flag = _cache[0] _cache[0] = max(0, min(2, int(flag))) # restrict to 0,1,2 return old_flag def mycopy(obj, level=0): if isinstance(obj, (list, tuple)): return obj.__class__(map(mycopy, obj)) elif isinstance(obj, dict): d = obj.__class__() for k,v in obj.items(): d[mycopy(k)] = mycopy(v) return d return obj def cache_it_fast(func): func._cache_it_cache = func_cache_it_cache = {} def wrapper(*args, **kw_args): if kw_args: keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args cache_flag = False try: r = func_cache_it_cache[k] except KeyError: r = func(*args, **kw_args) cache_flag = True if cache_flag: func_cache_it_cache[k] = r return mycopy(r) return wrapper def cache_it_immutable(func): func._cache_it_cache = func_cache_it_cache = {} def wrapper(*args, **kw_args): if kw_args: keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args try: return func_cache_it_cache[k] except KeyError: pass func_cache_it_cache[k] = r = func(*args, **kw_args) return r return wrapper def cache_it_debug(func): func._cache_it_cache = func_cache_it_cache = {} func._cache_it_cache_repr = func_cache_it_cache_repr = {} def wrapper(*args, **kw_args): if kw_args: keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args cache_flag = False try: r = func_cache_it_cache[k] except KeyError: r = func(*args, **kw_args) cache_flag = True if cache_flag: func_cache_it_cache[k] = r f = repr_level(0) func_cache_it_cache_repr[k] = repr(r) repr_level(f) else: s = func_cache_it_cache_repr[k] f = repr_level(0) new_s = repr(r) repr_level(f) # check that cache values have not changed assert new_s==s,`func,s,r, args[0].__class__` return mycopy(r) return wrapper cache_it = cache_it_fast #cache_it = cache_it_debug # twice slower def cache_it_nondummy(func): func._cache_it_cache = func_cache_it_cache = {} def wrapper(*args, **kw_args): if kw_args: try: dummy = kw_args['dummy'] except KeyError: dummy = None if dummy: return func(*args, **kw_args) keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args try: return func_cache_it_cache[k] except KeyError: pass func_cache_it_cache[k] = r = func(*args, **kw_args) return r return wrapper class BasicType(type): pass class MetaBasicMeths(BasicType): classnamespace = {} repr_level = 0 # defines the output of repr() singleton = {} def __init__(cls,*args,**kws): n = cls.__name__ c = MetaBasicMeths.classnamespace.get(n) if c is None: MetaBasicMeths.classnamespace[n] = cls else: print 'Ignoring redefinition of %s: %s defined earlier than %s' % (n, c, cls) type.__init__(cls, *args, **kws) # initialize default_assumptions dictionary default_assumptions = {} for k in dir(cls): if not k.startswith('is_'): continue v = getattr(cls, k) k = k[3:] if isinstance(v,(bool,int,long)): default_assumptions[k] = bool(v) cls.default_assumptions = default_assumptions def __getattr__(cls, name): try: return MetaBasicMeths.classnamespace[name] except KeyError: pass raise AttributeError("'%s' object has no attribute '%s'"% (cls.__name__, name)) def __cmp__(cls, other): n1 = cls.__name__ n2 = other.__name__ c = cmp(n1,n2) if not c: return 0 UNKNOWN = len(ordering_of_classes)+1 try: i1 = ordering_of_classes.index(n1) except ValueError: #print 'Add',n1,'to basic_methods.ordering_of_classes list' #return c i1 = UNKNOWN try: i2 = ordering_of_classes.index(n2) except ValueError: #print 'Add',n2,'to basic_methods.ordering_of_classes list' #return c i2 = UNKNOWN if i1 == UNKNOWN and i2 == UNKNOWN: return c return cmp(i1,i2) class BasicMeths(AssumeMeths): __metaclass__ = MetaBasicMeths Lambda_precedence = 1 Add_precedence = 40 Mul_precedence = 50 Pow_precedence = 60 Apply_precedence = 70 Item_precedence = 75 Atom_precedence = 1000 def __getattr__(self, name): try: return self._get_assumption(name) except AttributeError: pass if BasicMeths.classnamespace.has_key(name): return BasicMeths.classnamespace[name] else: raise AttributeError("'%s' object has no attribute '%s'"% (self.__class__.__name__, name)) def __setattr__(self, name, val): if name.startswith('is_'): raise AttributeError("Modification of assumptions is not allowed") else: AssumeMeths.__setattr__(self, name, val) def __hash__(self): # hash cannot be cached using cache_it because infinite recurrence # occurs as hash is needed for setting cache dictionary keys h = self._mhash if h is None: a = self._assume_hashable_content() self._mhash = h = hash((self.__class__.__name__,) + self._hashable_content() + a) return h def _hashable_content(self): # If class defines additional attributes, like name in Symbol, # then this method should be updated accordingly to return # relevant attributes as tuple. return self._args @property def precedence(self): return 0 def tostr(self, level=0): return self.torepr() def torepr(self): l = [] for o in self: try: l.append(o.torepr()) except AttributeError: l.append(repr(o)) return self.__class__.__name__ + '(' + ', '.join(l) + ')' def __str__(self): return self.tostr() @staticmethod def set_repr_level(flag = None): """ Set the representation level used for repr() printing, returning the current level. The available levels are: 0: Lowest level printing. Expressions printing should be be able to be evaluated through Python's eval() function 1: Higher level printing. Expressions are printed in a one-dimensional fashion, are easier to read than level 1, but cannot be parsed through eval() 2: Highest level printing. Expressions are simply two-dimensional, "pretty" versions of the expressions that are only useful for readability purposes. Notes: ====== - Level 2 printing is done through the printing module in smpy.printing.pretty. """ return repr_level(flag) def __repr__(self): plevel = repr_level() if plevel == 1: return self.tostr() elif plevel == 2: from sympy.printing.pretty import pretty return pretty(self) return self.torepr() def __len__(self): return len(self._args) def __getitem__(self, iter): return self._args[iter] def __contains__(self, what): if self == what: return True for x in self._args: if what in x: return True return False @staticmethod def set_precision(prec = None): """ Set precision for Decimal number operations and return previous precision value. """ context = decimal.getcontext() oldprec = context.prec if prec is not None: context.prec = prec return oldprec def __nonzero__(self): # prevent using constructs like: # a = Symbol('a') # if a: .. raise AssertionError("only Equality|Unequality can define __nonzero__ method, %r" % (self.__class__)) def compare(self, other): """ Return -1,0,1 if the object is smaller, equal, or greater than other (not always in mathematical sense). If the object is of different type from other then their classes are ordered according to sorted_classes list. """ # all redefinitions of __cmp__ method should start with the # following three lines: if self is other: return 0 c = cmp(self.__class__, other.__class__) if c: return c # st = self._hashable_content() ot = other._hashable_content() c = cmp(len(st),len(ot)) if c: return c Basic = self.__class__.Basic for l,r in zip(st,ot): if isinstance(l, Basic): c = l.compare(r) else: c = cmp(l, r) if c: return c return 0
""" Implementation of Basic low-level methods. """ import decimal from assumptions import AssumeMeths # used for canonical ordering of symbolic sequences # via __cmp__ method: ordering_of_classes = [ # singleton numbers 'Zero', 'One','Half','Infinity','NaN','NegativeOne','NegativeInfinity', # numbers 'Integer','Rational','Real', # singleton symbols 'Exp1','Pi','ImaginaryUnit', # symbols 'Symbol','Wild','Temporary', # Functions that should come before Pow/Add/Mul 'ApplyConjugate', 'ApplyAbs', # arithmetic operations 'Pow', 'Mul', 'Add', # function values 'Apply', 'ApplyExp','ApplyLog', 'ApplySin','ApplyCos','ApplyTan','ApplyCot', 'ApplyASin','ApplyACos','ApplyATan','ApplyACot', 'ApplySinh','ApplyCosh','ApplyTanh','ApplyCoth', 'ApplyASinh','ApplyACosh','ApplyATanh','ApplyACoth', 'ApplyRisingFactorial','ApplyFallingFactorial', 'ApplyFactorial','ApplyBinomial', 'ApplyFloor', 'ApplyCeiling', 'ApplyRe','ApplyIm', 'ApplyArg', 'ApplySqrt','ApplySign', 'ApplyMrvLog', 'ApplyGamma','ApplyLowerGamma','ApplyUpperGamma','ApplyPolyGamma', 'ApplyErf', 'ApplyChebyshev','ApplyChebyshev2', 'Derivative','Integral', # defined singleton functions 'Abs','Sign','Sqrt', 'Floor', 'Ceiling', 'Re', 'Im', 'Arg', 'Conjugate', 'Exp','Log','MrvLog', 'Sin','Cos','Tan','Cot','ASin','ACos','ATan','ACot', 'Sinh','Cosh','Tanh','Coth','ASinh','ACosh','ATanh','ACoth', 'RisingFactorial','FallingFactorial', 'Factorial','Binomial', 'Gamma','LowerGamma','UpperGamma','PolyGamma', 'Erf', # special polynomials 'Chebyshev','Chebyshev2', # undefined functions 'Function','WildFunction', # anonymous functions 'Lambda', # operators 'FDerivative','FApply', # composition of functions 'FPow', 'Composition', # Landau O symbol 'Order', # relational operations 'Equality', 'Unequality', 'StrictInequality', 'Inequality', ] # def repr_level(flag=None, _cache=[1]): if flag is None: return _cache[0] old_flag = _cache[0] _cache[0] = max(0, min(2, int(flag))) # restrict to 0,1,2 return old_flag def mycopy(obj, level=0): if isinstance(obj, (list, tuple)): return obj.__class__(map(mycopy, obj)) elif isinstance(obj, dict): d = obj.__class__() for k,v in obj.items(): d[mycopy(k)] = mycopy(v) return d return obj def cache_it_fast(func): func._cache_it_cache = func_cache_it_cache = {} def wrapper(*args, **kw_args): if kw_args: keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args cache_flag = False try: r = func_cache_it_cache[k] except KeyError: r = func(*args, **kw_args) cache_flag = True if cache_flag: func_cache_it_cache[k] = r return mycopy(r) return wrapper def cache_it_immutable(func): func._cache_it_cache = func_cache_it_cache = {} def wrapper(*args, **kw_args): if kw_args: keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args try: return func_cache_it_cache[k] except KeyError: pass func_cache_it_cache[k] = r = func(*args, **kw_args) return r return wrapper def cache_it_debug(func): func._cache_it_cache = func_cache_it_cache = {} func._cache_it_cache_repr = func_cache_it_cache_repr = {} def wrapper(*args, **kw_args): if kw_args: keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args cache_flag = False try: r = func_cache_it_cache[k] except KeyError: r = func(*args, **kw_args) cache_flag = True if cache_flag: func_cache_it_cache[k] = r f = repr_level(0) func_cache_it_cache_repr[k] = repr(r) repr_level(f) else: s = func_cache_it_cache_repr[k] f = repr_level(0) new_s = repr(r) repr_level(f) # check that cache values have not changed assert new_s==s,`func,s,r, args[0].__class__` return mycopy(r) return wrapper cache_it = cache_it_fast #cache_it = cache_it_debug # twice slower def cache_it_nondummy(func): func._cache_it_cache = func_cache_it_cache = {} def wrapper(*args, **kw_args): if kw_args: try: dummy = kw_args['dummy'] except KeyError: dummy = None if dummy: return func(*args, **kw_args) keys = kw_args.keys() keys.sort() items = [(k+'=',kw_args[k]) for k in keys] k = args + tuple(items) else: k = args try: return func_cache_it_cache[k] except KeyError: pass func_cache_it_cache[k] = r = func(*args, **kw_args) return r return wrapper class BasicType(type): pass class MetaBasicMeths(BasicType): classnamespace = {} repr_level = 0 # defines the output of repr() singleton = {} def __init__(cls,*args,**kws): n = cls.__name__ c = MetaBasicMeths.classnamespace.get(n) if c is None: MetaBasicMeths.classnamespace[n] = cls else: print 'Ignoring redefinition of %s: %s defined earlier than %s' % (n, c, cls) type.__init__(cls, *args, **kws) # initialize default_assumptions dictionary default_assumptions = {} for k in dir(cls): if not k.startswith('is_'): continue v = getattr(cls, k) k = k[3:] if isinstance(v,(bool,int,long)): default_assumptions[k] = bool(v) cls.default_assumptions = default_assumptions def __getattr__(cls, name): try: return MetaBasicMeths.classnamespace[name] except KeyError: pass raise AttributeError("'%s' object has no attribute '%s'"% (cls.__name__, name)) def __cmp__(cls, other): n1 = cls.__name__ n2 = other.__name__ c = cmp(n1,n2) if not c: return 0 UNKNOWN = len(ordering_of_classes)+1 try: i1 = ordering_of_classes.index(n1) except ValueError: #print 'Add',n1,'to basic_methods.ordering_of_classes list' #return c i1 = UNKNOWN try: i2 = ordering_of_classes.index(n2) except ValueError: #print 'Add',n2,'to basic_methods.ordering_of_classes list' #return c i2 = UNKNOWN if i1 == UNKNOWN and i2 == UNKNOWN: return c return cmp(i1,i2) class BasicMeths(AssumeMeths): __metaclass__ = MetaBasicMeths Lambda_precedence = 1 Add_precedence = 40 Mul_precedence = 50 Pow_precedence = 60 Apply_precedence = 70 Item_precedence = 75 Atom_precedence = 1000 def __getattr__(self, name): try: return self._get_assumption(name) except AttributeError: pass if BasicMeths.classnamespace.has_key(name): return BasicMeths.classnamespace[name] else: raise AttributeError("'%s' object has no attribute '%s'"% (self.__class__.__name__, name)) def __setattr__(self, name, val): if name.startswith('is_'): raise AttributeError("Modification of assumptions is not allowed") else: AssumeMeths.__setattr__(self, name, val) def __hash__(self): # hash cannot be cached using cache_it because infinite recurrence # occurs as hash is needed for setting cache dictionary keys h = self._mhash if h is None: a = self._assume_hashable_content() self._mhash = h = hash((self.__class__.__name__,) + self._hashable_content() + a) return h def _hashable_content(self): # If class defines additional attributes, like name in Symbol, # then this method should be updated accordingly to return # relevant attributes as tuple. return self._args @property def precedence(self): return 0 def tostr(self, level=0): return self.torepr() def torepr(self): l = [] for o in self: try: l.append(o.torepr()) except AttributeError: l.append(repr(o)) return self.__class__.__name__ + '(' + ', '.join(l) + ')' def __str__(self): return self.tostr() @staticmethod def set_repr_level(flag = None): """ Set the representation level used for repr() printing, returning the current level. The available levels are: 0: Lowest level printing. Expressions printing should be be able to be evaluated through Python's eval() function 1: Higher level printing. Expressions are printed in a one-dimensional fashion, are easier to read than level 1, but cannot be parsed through eval() 2: Highest level printing. Expressions are simply two-dimensional, "pretty" versions of the expressions that are only useful for readability purposes. Notes: ====== - Level 2 printing is done through the printing module in smpy.printing.pretty. """ return repr_level(flag) def __repr__(self): plevel = repr_level() if plevel == 1: return self.tostr() elif plevel == 2: from sympy.printing.pretty import pretty return pretty(self) return self.torepr() def __len__(self): return len(self._args) def __getitem__(self, iter): return self._args[iter] def __contains__(self, what): if self == what: return True for x in self._args: if what in x: return True return False @staticmethod def set_precision(prec = None): """ Set precision for Decimal number operations and return previous precision value. """ context = decimal.getcontext() oldprec = context.prec if prec is not None: context.prec = prec return oldprec def __nonzero__(self): # prevent using constructs like: # a = Symbol('a') # if a: .. raise AssertionError("only Equality|Unequality can define __nonzero__ method, %r" % (self.__class__)) def compare(self, other): """ Return -1,0,1 if the object is smaller, equal, or greater than other (not always in mathematical sense). If the object is of different type from other then their classes are ordered according to sorted_classes list. """ # all redefinitions of __cmp__ method should start with the # following three lines: if self is other: return 0 c = cmp(self.__class__, other.__class__) if c: return c # st = self._hashable_content() ot = other._hashable_content() c = cmp(len(st),len(ot)) if c: return c Basic = self.__class__.Basic for l,r in zip(st,ot): if isinstance(l, Basic): c = l.compare(r) else: c = cmp(l, r) if c: return c return 0
en
0.795146
Implementation of Basic low-level methods. # used for canonical ordering of symbolic sequences # via __cmp__ method: # singleton numbers # numbers # singleton symbols # symbols # Functions that should come before Pow/Add/Mul # arithmetic operations # function values # defined singleton functions # special polynomials # undefined functions # anonymous functions # operators # composition of functions # Landau O symbol # relational operations # # restrict to 0,1,2 # check that cache values have not changed #cache_it = cache_it_debug # twice slower # defines the output of repr() # initialize default_assumptions dictionary #print 'Add',n1,'to basic_methods.ordering_of_classes list' #return c #print 'Add',n2,'to basic_methods.ordering_of_classes list' #return c # hash cannot be cached using cache_it because infinite recurrence # occurs as hash is needed for setting cache dictionary keys # If class defines additional attributes, like name in Symbol, # then this method should be updated accordingly to return # relevant attributes as tuple. Set the representation level used for repr() printing, returning the current level. The available levels are: 0: Lowest level printing. Expressions printing should be be able to be evaluated through Python's eval() function 1: Higher level printing. Expressions are printed in a one-dimensional fashion, are easier to read than level 1, but cannot be parsed through eval() 2: Highest level printing. Expressions are simply two-dimensional, "pretty" versions of the expressions that are only useful for readability purposes. Notes: ====== - Level 2 printing is done through the printing module in smpy.printing.pretty. Set precision for Decimal number operations and return previous precision value. # prevent using constructs like: # a = Symbol('a') # if a: .. Return -1,0,1 if the object is smaller, equal, or greater than other (not always in mathematical sense). If the object is of different type from other then their classes are ordered according to sorted_classes list. # all redefinitions of __cmp__ method should start with the # following three lines: #
2.550861
3
models/backbone.py
padeler/PE-former
15
6629807
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Backbone modules. """ from collections import OrderedDict import torch import torch.nn.functional as F import torchvision from torch import nn from torchvision.models._utils import IntermediateLayerGetter from typing import Dict, List from util.misc import NestedTensor, is_main_process, load_pretrained_weights_vit from .position_encoding import build_position_encoding from models import vision_transformer import math class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() bias = b - rm * scale return x * scale + bias class BackboneBase(nn.Module): def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): super().__init__() for name, parameter in backbone.named_parameters(): if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: parameter.requires_grad_(False) if return_interm_layers: return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} else: return_layers = {'layer4': "0"} self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) self.num_channels = num_channels # def forward(self, tensor_list: NestedTensor): # xs = self.body(tensor_list.tensors) # out: Dict[str, NestedTensor] = {} # for name, x in xs.items(): # m = tensor_list.mask # assert m is not None # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] # out[name] = NestedTensor(x, mask) # return out def forward(self, tensor_list): xs = self.body(tensor_list) return xs class Backbone(BackboneBase): """ResNet backbone with frozen BatchNorm.""" def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool): backbone = getattr(torchvision.models, name)( replace_stride_with_dilation=[False, False, dilation], pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 super().__init__(backbone, train_backbone, num_channels, return_interm_layers) class BackboneVit(nn.Module): """Vit backbone.""" def __init__(self, args): patch_size = args.patch_size # Initialize VIT if args.vit_arch != "resnet50": if "_tiny" in args.vit_arch: backbone = vision_transformer.deit_tiny(patch_size=patch_size, img_size=args.input_size, drop_rate=args.vit_dropout, attn_drop_rate=args.vit_dropout, drop_path_rate=args.vit_dropout) else: backbone = vision_transformer.deit_small(patch_size=patch_size, img_size=args.input_size, drop_rate=args.vit_dropout, attn_drop_rate=args.vit_dropout, drop_path_rate=args.vit_dropout) backbone.dim_patches = [backbone.patch_embed.img_size[0] // backbone.patch_embed.patch_size[0], backbone.patch_embed.img_size[1] // backbone.patch_embed.patch_size[1]] self.is_vit_backbone = True load_pretrained_weights_vit(backbone, args.vit_weights, checkpoint_key="teacher", model_name=args.vit_arch, patch_size=patch_size) elif args.vit_arch == "resnet50": from models.transformer_vit import create_resnet_encoder backbone = create_resnet_encoder(args) self.is_vit_backbone = False # XXX This is the output features size for # resnet50. Resnet50 Downsamples the input by 32 backbone.dim_patches = [args.input_size[0] // 32, args.input_size[1] // 32] super().__init__() num_patches = backbone.dim_patches[0] * backbone.dim_patches[1] # number of patches in the image # i.e the image has patches_dim x patches_dim self.num_patches = num_patches # self.patches_dim = int(num_patches ** 0.5) # Dont train the backbone ViT as a first test # for name, parameter in backbone.named_parameters(): # parameter.requires_grad_(False) # return_layers = {'layer4': "0"} self.body = backbone # number of ViT "channels" # depends on input res and number of boxes. # I.e 224x224 input res with 16x16 box size gives 14x14 outputs # 16X16 patch size for an RGB image (3channels) is 16*16*3 = 764 # Deit_small halfs this input to 384 and that is its embedding dim size # I concatenate the vector from CLS with each patch vector (of size args.vim_dim) # This gives a total number of channels 2 * args.vit_dim self.num_channels = args.vit_dim def forward(self, tensor_list): if self.is_vit_backbone: # vit version out = self.body.forward(tensor_list, cls_only=False) else: # resnet version out = self.body.forward(tensor_list) out = out.permute(0, 2, 1) # send channels last # Shape is B, N, C where N is patches+1 (i.e 14*14+1 for 16 patch size and 224 input) return out # convert to xs # XXX The following was part of early testing with concatenating CLS with # each tokern to increase the channels. # Turned out to make no positive difference # Keeping it here in comments for now. # cls_out = vit_out[:, 0:1, :] # vit_out = vit_out[:, 1:, :] # B, N, C = vit_out.shape # cls_out = cls_out.expand(-1, N, -1) # # cls_out # vit_out = torch.cat((cls_out, vit_out), 2) # C = 2 * C # number of channels after concat # D = self.patches_dim # xs = OrderedDict() # xs["0"] = vit_out.reshape(B, D, D, C).permute(0, 3, 1, 2) # return xs # class Joiner(nn.Sequential): # def __init__(self, backbone, position_embedding): # super().__init__(backbone, position_embedding) # def forward(self, tensor_list): # xs = self[0](tensor_list) # out = [] # pos = [] # for name, x in xs.items(): # out.append(x) # # position encoding # pos.append(self[1](x).to(x[0].dtype)) # return out, pos # def build_backbone(args): # position_embedding = build_position_encoding(args) # train_backbone = args.lr_backbone > 0 # return_interm_layers = args.masks # backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) # model = Joiner(backbone, position_embedding) # model.num_channels = backbone.num_channels # return model def build_backbone_vit(args): # position_embedding = build_position_encoding(args) backbone = BackboneVit(args) # backbone = Backbone("resnet50", True, False, False) # model = Joiner(backbone, position_embedding) # model.num_channels = backbone.num_channels return backbone
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Backbone modules. """ from collections import OrderedDict import torch import torch.nn.functional as F import torchvision from torch import nn from torchvision.models._utils import IntermediateLayerGetter from typing import Dict, List from util.misc import NestedTensor, is_main_process, load_pretrained_weights_vit from .position_encoding import build_position_encoding from models import vision_transformer import math class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() bias = b - rm * scale return x * scale + bias class BackboneBase(nn.Module): def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): super().__init__() for name, parameter in backbone.named_parameters(): if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: parameter.requires_grad_(False) if return_interm_layers: return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} else: return_layers = {'layer4': "0"} self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) self.num_channels = num_channels # def forward(self, tensor_list: NestedTensor): # xs = self.body(tensor_list.tensors) # out: Dict[str, NestedTensor] = {} # for name, x in xs.items(): # m = tensor_list.mask # assert m is not None # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] # out[name] = NestedTensor(x, mask) # return out def forward(self, tensor_list): xs = self.body(tensor_list) return xs class Backbone(BackboneBase): """ResNet backbone with frozen BatchNorm.""" def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool): backbone = getattr(torchvision.models, name)( replace_stride_with_dilation=[False, False, dilation], pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 super().__init__(backbone, train_backbone, num_channels, return_interm_layers) class BackboneVit(nn.Module): """Vit backbone.""" def __init__(self, args): patch_size = args.patch_size # Initialize VIT if args.vit_arch != "resnet50": if "_tiny" in args.vit_arch: backbone = vision_transformer.deit_tiny(patch_size=patch_size, img_size=args.input_size, drop_rate=args.vit_dropout, attn_drop_rate=args.vit_dropout, drop_path_rate=args.vit_dropout) else: backbone = vision_transformer.deit_small(patch_size=patch_size, img_size=args.input_size, drop_rate=args.vit_dropout, attn_drop_rate=args.vit_dropout, drop_path_rate=args.vit_dropout) backbone.dim_patches = [backbone.patch_embed.img_size[0] // backbone.patch_embed.patch_size[0], backbone.patch_embed.img_size[1] // backbone.patch_embed.patch_size[1]] self.is_vit_backbone = True load_pretrained_weights_vit(backbone, args.vit_weights, checkpoint_key="teacher", model_name=args.vit_arch, patch_size=patch_size) elif args.vit_arch == "resnet50": from models.transformer_vit import create_resnet_encoder backbone = create_resnet_encoder(args) self.is_vit_backbone = False # XXX This is the output features size for # resnet50. Resnet50 Downsamples the input by 32 backbone.dim_patches = [args.input_size[0] // 32, args.input_size[1] // 32] super().__init__() num_patches = backbone.dim_patches[0] * backbone.dim_patches[1] # number of patches in the image # i.e the image has patches_dim x patches_dim self.num_patches = num_patches # self.patches_dim = int(num_patches ** 0.5) # Dont train the backbone ViT as a first test # for name, parameter in backbone.named_parameters(): # parameter.requires_grad_(False) # return_layers = {'layer4': "0"} self.body = backbone # number of ViT "channels" # depends on input res and number of boxes. # I.e 224x224 input res with 16x16 box size gives 14x14 outputs # 16X16 patch size for an RGB image (3channels) is 16*16*3 = 764 # Deit_small halfs this input to 384 and that is its embedding dim size # I concatenate the vector from CLS with each patch vector (of size args.vim_dim) # This gives a total number of channels 2 * args.vit_dim self.num_channels = args.vit_dim def forward(self, tensor_list): if self.is_vit_backbone: # vit version out = self.body.forward(tensor_list, cls_only=False) else: # resnet version out = self.body.forward(tensor_list) out = out.permute(0, 2, 1) # send channels last # Shape is B, N, C where N is patches+1 (i.e 14*14+1 for 16 patch size and 224 input) return out # convert to xs # XXX The following was part of early testing with concatenating CLS with # each tokern to increase the channels. # Turned out to make no positive difference # Keeping it here in comments for now. # cls_out = vit_out[:, 0:1, :] # vit_out = vit_out[:, 1:, :] # B, N, C = vit_out.shape # cls_out = cls_out.expand(-1, N, -1) # # cls_out # vit_out = torch.cat((cls_out, vit_out), 2) # C = 2 * C # number of channels after concat # D = self.patches_dim # xs = OrderedDict() # xs["0"] = vit_out.reshape(B, D, D, C).permute(0, 3, 1, 2) # return xs # class Joiner(nn.Sequential): # def __init__(self, backbone, position_embedding): # super().__init__(backbone, position_embedding) # def forward(self, tensor_list): # xs = self[0](tensor_list) # out = [] # pos = [] # for name, x in xs.items(): # out.append(x) # # position encoding # pos.append(self[1](x).to(x[0].dtype)) # return out, pos # def build_backbone(args): # position_embedding = build_position_encoding(args) # train_backbone = args.lr_backbone > 0 # return_interm_layers = args.masks # backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) # model = Joiner(backbone, position_embedding) # model.num_channels = backbone.num_channels # return model def build_backbone_vit(args): # position_embedding = build_position_encoding(args) backbone = BackboneVit(args) # backbone = Backbone("resnet50", True, False, False) # model = Joiner(backbone, position_embedding) # model.num_channels = backbone.num_channels return backbone
en
0.725205
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved Backbone modules. BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. # move reshapes to the beginning # to make it fuser-friendly # def forward(self, tensor_list: NestedTensor): # xs = self.body(tensor_list.tensors) # out: Dict[str, NestedTensor] = {} # for name, x in xs.items(): # m = tensor_list.mask # assert m is not None # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] # out[name] = NestedTensor(x, mask) # return out ResNet backbone with frozen BatchNorm. Vit backbone. # Initialize VIT # XXX This is the output features size for # resnet50. Resnet50 Downsamples the input by 32 # number of patches in the image # i.e the image has patches_dim x patches_dim # self.patches_dim = int(num_patches ** 0.5) # Dont train the backbone ViT as a first test # for name, parameter in backbone.named_parameters(): # parameter.requires_grad_(False) # return_layers = {'layer4': "0"} # number of ViT "channels" # depends on input res and number of boxes. # I.e 224x224 input res with 16x16 box size gives 14x14 outputs # 16X16 patch size for an RGB image (3channels) is 16*16*3 = 764 # Deit_small halfs this input to 384 and that is its embedding dim size # I concatenate the vector from CLS with each patch vector (of size args.vim_dim) # This gives a total number of channels 2 * args.vit_dim # vit version # resnet version # send channels last # Shape is B, N, C where N is patches+1 (i.e 14*14+1 for 16 patch size and 224 input) # convert to xs # XXX The following was part of early testing with concatenating CLS with # each tokern to increase the channels. # Turned out to make no positive difference # Keeping it here in comments for now. # cls_out = vit_out[:, 0:1, :] # vit_out = vit_out[:, 1:, :] # B, N, C = vit_out.shape # cls_out = cls_out.expand(-1, N, -1) # # cls_out # vit_out = torch.cat((cls_out, vit_out), 2) # C = 2 * C # number of channels after concat # D = self.patches_dim # xs = OrderedDict() # xs["0"] = vit_out.reshape(B, D, D, C).permute(0, 3, 1, 2) # return xs # class Joiner(nn.Sequential): # def __init__(self, backbone, position_embedding): # super().__init__(backbone, position_embedding) # def forward(self, tensor_list): # xs = self[0](tensor_list) # out = [] # pos = [] # for name, x in xs.items(): # out.append(x) # # position encoding # pos.append(self[1](x).to(x[0].dtype)) # return out, pos # def build_backbone(args): # position_embedding = build_position_encoding(args) # train_backbone = args.lr_backbone > 0 # return_interm_layers = args.masks # backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) # model = Joiner(backbone, position_embedding) # model.num_channels = backbone.num_channels # return model # position_embedding = build_position_encoding(args) # backbone = Backbone("resnet50", True, False, False) # model = Joiner(backbone, position_embedding) # model.num_channels = backbone.num_channels
2.014183
2
imagekit/management/commands/generateimages.py
jiinus/django-imagekit
0
6629808
import re from django.core.management.base import BaseCommand from ...exceptions import MissingSource from ...registry import cachefile_registry, generator_registry class Command(BaseCommand): help = ("""Generate files for the specified image generators (or all of them if none was provided). Simple, glob-like wildcards are allowed, with * matching all characters within a segment, and ** matching across segments. (Segments are separated with colons.) So, for example, "a:*:c" will match "a:b:c", but not "a:b:x:c", whereas "a:**:c" will match both. Subsegments are always matched, so "a" will match "a" as well as "a:b" and "a:b:c".""") args = '[generator_ids]' def add_arguments(self, parser): parser.add_argument('generator_id', nargs='*', help='<app_name>:<model>:<field> for model specs') def handle(self, *args, **options): generators = generator_registry.get_ids() generator_ids = options['generator_id'] if 'generator_id' in options else args if generator_ids: patterns = self.compile_patterns(generator_ids) generators = (id for id in generators if any(p.match(id) for p in patterns)) for generator_id in generators: self.stdout.write('Validating generator: %s\n' % generator_id) for image_file in cachefile_registry.get(generator_id): if image_file.name: self.stdout.write(' %s\n' % image_file.name) try: image_file.generate() except MissingSource as err: self.stdout.write('\t No source associated with\n') except Exception as err: self.stdout.write('\tFailed %s\n' % (err)) def compile_patterns(self, generator_ids): return [self.compile_pattern(id) for id in generator_ids] def compile_pattern(self, generator_id): parts = re.split(r'(\*{1,2})', generator_id) pattern = '' for part in parts: if part == '*': pattern += '[^:]*' elif part == '**': pattern += '.*' else: pattern += re.escape(part) return re.compile('^%s(:.*)?$' % pattern)
import re from django.core.management.base import BaseCommand from ...exceptions import MissingSource from ...registry import cachefile_registry, generator_registry class Command(BaseCommand): help = ("""Generate files for the specified image generators (or all of them if none was provided). Simple, glob-like wildcards are allowed, with * matching all characters within a segment, and ** matching across segments. (Segments are separated with colons.) So, for example, "a:*:c" will match "a:b:c", but not "a:b:x:c", whereas "a:**:c" will match both. Subsegments are always matched, so "a" will match "a" as well as "a:b" and "a:b:c".""") args = '[generator_ids]' def add_arguments(self, parser): parser.add_argument('generator_id', nargs='*', help='<app_name>:<model>:<field> for model specs') def handle(self, *args, **options): generators = generator_registry.get_ids() generator_ids = options['generator_id'] if 'generator_id' in options else args if generator_ids: patterns = self.compile_patterns(generator_ids) generators = (id for id in generators if any(p.match(id) for p in patterns)) for generator_id in generators: self.stdout.write('Validating generator: %s\n' % generator_id) for image_file in cachefile_registry.get(generator_id): if image_file.name: self.stdout.write(' %s\n' % image_file.name) try: image_file.generate() except MissingSource as err: self.stdout.write('\t No source associated with\n') except Exception as err: self.stdout.write('\tFailed %s\n' % (err)) def compile_patterns(self, generator_ids): return [self.compile_pattern(id) for id in generator_ids] def compile_pattern(self, generator_id): parts = re.split(r'(\*{1,2})', generator_id) pattern = '' for part in parts: if part == '*': pattern += '[^:]*' elif part == '**': pattern += '.*' else: pattern += re.escape(part) return re.compile('^%s(:.*)?$' % pattern)
en
0.921761
Generate files for the specified image generators (or all of them if none was provided). Simple, glob-like wildcards are allowed, with * matching all characters within a segment, and ** matching across segments. (Segments are separated with colons.) So, for example, "a:*:c" will match "a:b:c", but not "a:b:x:c", whereas "a:**:c" will match both. Subsegments are always matched, so "a" will match "a" as well as "a:b" and "a:b:c".
2.29702
2
cumulusci/robotframework/locator_manager.py
edmondo1984/CumulusCI
1
6629809
from robot.libraries.BuiltIn import BuiltIn import functools from cumulusci.core.utils import dictmerge """ This module supports managing multiple location strategies. It works like this: 0. Locators are stored in a global LOCATORS dictionary. The keys are the locator prefixes, the values are dictionaries containing the locators 1. Libraries can register a dictionary of locators with a prefix (eg: register_locators("npsp", {...})). These get added to LOCATORS 2. Open Test Browser calls selenium's add_location_strategy for each registered set of locators. (Note: Location strategies cannot be added before a browser is open). 3. Keywords can use dot notation to refer to locators. A colon separates the prefix from the locator, and a locator from a comma-separated string of arguments Example: from a keyword library: | from cumulusci.robotframework.locator_manager import register_locators | | register_locators("example", {"dialog": {"button": "//button[text()='{}']"}}) in a test: | page should contain element example:dialog.button:Save ... will result in "//button[text()='Save']" being passed to 'page should contain element' """ LOCATORS = {} def register_locators(prefix, locators): """Register a strategy with a set of locators or a keyword If the prefix is already known, merge in the new locators. """ if prefix in LOCATORS: BuiltIn().log(f"merging keywords for prefix {prefix}", "DEBUG") dictmerge(LOCATORS[prefix], locators) else: BuiltIn().log(f"registering keywords for prefix {prefix}", "DEBUG") LOCATORS[prefix] = locators def add_location_strategies(): """Call selenium's add_location_strategy keyword for all strategies""" # selenium throws an error if the location strategy already # exists, so we use a flag to make sure this code is called # only once. selenium = BuiltIn().get_library_instance("SeleniumLibrary") for (prefix, strategy) in LOCATORS.items(): try: BuiltIn().log(f"adding location strategy for '{prefix}'", "DEBUG") if isinstance(strategy, dict): selenium.add_location_strategy( prefix, functools.partial(locate_element, prefix) ) else: # not a dict? Just pass it through to selenium as-is # so that this function can register normal keywords selenium.add_location_strategy(prefix, strategy) except Exception as e: BuiltIn().log(f"unable to register locators: {e}", "DEBUG") def locate_element(prefix, parent, locator, tag, constraints): """This is the function called by SeleniumLibrary when a custom locator strategy is used (eg: cci:foo.bar). We pass an additional argument, prefix, so we know which set of locators to use. This tokenizes the locator and then does a lookup in the dictionary associated with the given prefix. If any arguments are present, they are applied with .format() before being used to find an element. """ # Ideally we should call get_webelements (plural) and filter # the results based on the tag and constraints arguments, but # the documentation on those arguments is virtually nil and # SeleniumLibrary's filter mechanism is a private function. In # practice it probably won't matter <shrug>. selenium = BuiltIn().get_library_instance("SeleniumLibrary") loc = translate_locator(prefix, locator) BuiltIn().log(f"locate_element: '{prefix}:{locator}' => {loc}", "DEBUG") try: element = selenium.get_webelement(loc) except Exception: raise Exception( f"Element with locator '{prefix}:{locator}' not found\ntranslated: '{loc}'" ) return element def translate_locator(prefix, locator): """Return the translated locator This uses the passed-in prefix and locator to find the proper element in the LOCATORS dictionary, and then formats it with any arguments that were part of the locator. """ if ":" in locator: (path, argstring) = locator.split(":", 1) else: path = locator argstring = "" loc = LOCATORS[prefix] breadcrumbs = [] try: for key in path.split("."): breadcrumbs.append(key) loc = loc[key.strip()] except KeyError: breadcrumb_path = ".".join(breadcrumbs) raise KeyError(f"locator {prefix}:{breadcrumb_path} not found") if not isinstance(loc, str): raise TypeError(f"Expected locator to be of type string, but was {type(loc)}") try: # args is still a string, so split on "," # This means that arguments can't have commas in them, but I'm not sure # that will be a problem. If we find a case where it's a problem we can # do more sophisticated parsing. args = [arg.strip() for arg in argstring.split(",")] if argstring else [] loc = loc.format(*args) except IndexError: raise Exception("Not enough arguments were supplied") return loc
from robot.libraries.BuiltIn import BuiltIn import functools from cumulusci.core.utils import dictmerge """ This module supports managing multiple location strategies. It works like this: 0. Locators are stored in a global LOCATORS dictionary. The keys are the locator prefixes, the values are dictionaries containing the locators 1. Libraries can register a dictionary of locators with a prefix (eg: register_locators("npsp", {...})). These get added to LOCATORS 2. Open Test Browser calls selenium's add_location_strategy for each registered set of locators. (Note: Location strategies cannot be added before a browser is open). 3. Keywords can use dot notation to refer to locators. A colon separates the prefix from the locator, and a locator from a comma-separated string of arguments Example: from a keyword library: | from cumulusci.robotframework.locator_manager import register_locators | | register_locators("example", {"dialog": {"button": "//button[text()='{}']"}}) in a test: | page should contain element example:dialog.button:Save ... will result in "//button[text()='Save']" being passed to 'page should contain element' """ LOCATORS = {} def register_locators(prefix, locators): """Register a strategy with a set of locators or a keyword If the prefix is already known, merge in the new locators. """ if prefix in LOCATORS: BuiltIn().log(f"merging keywords for prefix {prefix}", "DEBUG") dictmerge(LOCATORS[prefix], locators) else: BuiltIn().log(f"registering keywords for prefix {prefix}", "DEBUG") LOCATORS[prefix] = locators def add_location_strategies(): """Call selenium's add_location_strategy keyword for all strategies""" # selenium throws an error if the location strategy already # exists, so we use a flag to make sure this code is called # only once. selenium = BuiltIn().get_library_instance("SeleniumLibrary") for (prefix, strategy) in LOCATORS.items(): try: BuiltIn().log(f"adding location strategy for '{prefix}'", "DEBUG") if isinstance(strategy, dict): selenium.add_location_strategy( prefix, functools.partial(locate_element, prefix) ) else: # not a dict? Just pass it through to selenium as-is # so that this function can register normal keywords selenium.add_location_strategy(prefix, strategy) except Exception as e: BuiltIn().log(f"unable to register locators: {e}", "DEBUG") def locate_element(prefix, parent, locator, tag, constraints): """This is the function called by SeleniumLibrary when a custom locator strategy is used (eg: cci:foo.bar). We pass an additional argument, prefix, so we know which set of locators to use. This tokenizes the locator and then does a lookup in the dictionary associated with the given prefix. If any arguments are present, they are applied with .format() before being used to find an element. """ # Ideally we should call get_webelements (plural) and filter # the results based on the tag and constraints arguments, but # the documentation on those arguments is virtually nil and # SeleniumLibrary's filter mechanism is a private function. In # practice it probably won't matter <shrug>. selenium = BuiltIn().get_library_instance("SeleniumLibrary") loc = translate_locator(prefix, locator) BuiltIn().log(f"locate_element: '{prefix}:{locator}' => {loc}", "DEBUG") try: element = selenium.get_webelement(loc) except Exception: raise Exception( f"Element with locator '{prefix}:{locator}' not found\ntranslated: '{loc}'" ) return element def translate_locator(prefix, locator): """Return the translated locator This uses the passed-in prefix and locator to find the proper element in the LOCATORS dictionary, and then formats it with any arguments that were part of the locator. """ if ":" in locator: (path, argstring) = locator.split(":", 1) else: path = locator argstring = "" loc = LOCATORS[prefix] breadcrumbs = [] try: for key in path.split("."): breadcrumbs.append(key) loc = loc[key.strip()] except KeyError: breadcrumb_path = ".".join(breadcrumbs) raise KeyError(f"locator {prefix}:{breadcrumb_path} not found") if not isinstance(loc, str): raise TypeError(f"Expected locator to be of type string, but was {type(loc)}") try: # args is still a string, so split on "," # This means that arguments can't have commas in them, but I'm not sure # that will be a problem. If we find a case where it's a problem we can # do more sophisticated parsing. args = [arg.strip() for arg in argstring.split(",")] if argstring else [] loc = loc.format(*args) except IndexError: raise Exception("Not enough arguments were supplied") return loc
en
0.82195
This module supports managing multiple location strategies. It works like this: 0. Locators are stored in a global LOCATORS dictionary. The keys are the locator prefixes, the values are dictionaries containing the locators 1. Libraries can register a dictionary of locators with a prefix (eg: register_locators("npsp", {...})). These get added to LOCATORS 2. Open Test Browser calls selenium's add_location_strategy for each registered set of locators. (Note: Location strategies cannot be added before a browser is open). 3. Keywords can use dot notation to refer to locators. A colon separates the prefix from the locator, and a locator from a comma-separated string of arguments Example: from a keyword library: | from cumulusci.robotframework.locator_manager import register_locators | | register_locators("example", {"dialog": {"button": "//button[text()='{}']"}}) in a test: | page should contain element example:dialog.button:Save ... will result in "//button[text()='Save']" being passed to 'page should contain element' Register a strategy with a set of locators or a keyword If the prefix is already known, merge in the new locators. Call selenium's add_location_strategy keyword for all strategies # selenium throws an error if the location strategy already # exists, so we use a flag to make sure this code is called # only once. # not a dict? Just pass it through to selenium as-is # so that this function can register normal keywords This is the function called by SeleniumLibrary when a custom locator strategy is used (eg: cci:foo.bar). We pass an additional argument, prefix, so we know which set of locators to use. This tokenizes the locator and then does a lookup in the dictionary associated with the given prefix. If any arguments are present, they are applied with .format() before being used to find an element. # Ideally we should call get_webelements (plural) and filter # the results based on the tag and constraints arguments, but # the documentation on those arguments is virtually nil and # SeleniumLibrary's filter mechanism is a private function. In # practice it probably won't matter <shrug>. Return the translated locator This uses the passed-in prefix and locator to find the proper element in the LOCATORS dictionary, and then formats it with any arguments that were part of the locator. # args is still a string, so split on "," # This means that arguments can't have commas in them, but I'm not sure # that will be a problem. If we find a case where it's a problem we can # do more sophisticated parsing.
3.135866
3
pahelix/utils/splitters.py
WorldEditors/PaddleHelix
0
6629810
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ splitters """ import random import numpy as np from itertools import compress from rdkit.Chem.Scaffolds import MurckoScaffold from collections import defaultdict from sklearn.model_selection import StratifiedKFold __all__ = [ 'RandomSplitter', 'IndexSplitter', 'ScaffoldSplitter', 'RandomScaffoldSplitter', ] def generate_scaffold(smiles, include_chirality=False): """ Obtain Bemis-Murcko scaffold from smiles Args: smiles: include_chirality: Return: scaffold: the scaffold of the given smiles. """ scaffold = MurckoScaffold.MurckoScaffoldSmiles( smiles=smiles, includeChirality=include_chirality) return scaffold class Splitter(object): """ The abstract class of splitters which split up dataset into train/valid/test subsets. """ def __init__(self): super(Splitter, self).__init__() class RandomSplitter(Splitter): """ Random splitter. """ def __init__(self): super(RandomSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None, seed=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. seed(int|None): the random seed. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) indices = list(range(N)) rng = np.random.RandomState(seed) rng.shuffle(indices) train_cutoff = int(frac_train * N) valid_cutoff = int((frac_train + frac_valid) * N) train_dataset = dataset[indices[:train_cutoff]] valid_dataset = dataset[indices[train_cutoff:valid_cutoff]] test_dataset = dataset[indices[valid_cutoff:]] return train_dataset, valid_dataset, test_dataset class IndexSplitter(Splitter): """ Split daatasets that has already been orderd. The first `frac_train` proportion is used for train set, the next `frac_valid` for valid set and the final `frac_test` for test set. """ def __init__(self): super(IndexSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) indices = list(range(N)) train_cutoff = int(frac_train * N) valid_cutoff = int((frac_train + frac_valid) * N) train_dataset = dataset[indices[:train_cutoff]] valid_dataset = dataset[indices[train_cutoff:valid_cutoff]] test_dataset = dataset[indices[valid_cutoff:]] return train_dataset, valid_dataset, test_dataset class ScaffoldSplitter(Splitter): """ Adapted from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py Split dataset by Bemis-Murcko scaffolds """ def __init__(self): super(ScaffoldSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) # create dict of the form {scaffold_i: [idx1, idx....]} all_scaffolds = {} for i in range(N): scaffold = generate_scaffold(dataset[i]['smiles'], include_chirality=True) if scaffold not in all_scaffolds: all_scaffolds[scaffold] = [i] else: all_scaffolds[scaffold].append(i) # sort from largest to smallest sets all_scaffolds = {key: sorted(value) for key, value in all_scaffolds.items()} all_scaffold_sets = [ scaffold_set for (scaffold, scaffold_set) in sorted( all_scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True) ] # get train, valid test indices train_cutoff = frac_train * N valid_cutoff = (frac_train + frac_valid) * N train_idx, valid_idx, test_idx = [], [], [] for scaffold_set in all_scaffold_sets: if len(train_idx) + len(scaffold_set) > train_cutoff: if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff: test_idx.extend(scaffold_set) else: valid_idx.extend(scaffold_set) else: train_idx.extend(scaffold_set) assert len(set(train_idx).intersection(set(valid_idx))) == 0 assert len(set(test_idx).intersection(set(valid_idx))) == 0 # get train, valid test indices train_cutoff = frac_train * N valid_cutoff = (frac_train + frac_valid) * N train_idx, valid_idx, test_idx = [], [], [] for scaffold_set in all_scaffold_sets: if len(train_idx) + len(scaffold_set) > train_cutoff: if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff: test_idx.extend(scaffold_set) else: valid_idx.extend(scaffold_set) else: train_idx.extend(scaffold_set) assert len(set(train_idx).intersection(set(valid_idx))) == 0 assert len(set(test_idx).intersection(set(valid_idx))) == 0 train_dataset = dataset[train_idx] valid_dataset = dataset[valid_idx] test_dataset = dataset[test_idx] return train_dataset, valid_dataset, test_dataset class RandomScaffoldSplitter(Splitter): """ Adapted from https://github.com/pfnet-research/chainer-chemistry/blob/master/chainer_chemistry/dataset/splitters/scaffold_splitter.py Split dataset by Bemis-Murcko scaffolds """ def __init__(self): super(RandomScaffoldSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None, seed=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. seed(int|None): the random seed. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) rng = np.random.RandomState(seed) scaffolds = defaultdict(list) for ind in range(N): scaffold = generate_scaffold(dataset[ind]['smiles'], include_chirality=True) scaffolds[scaffold].append(ind) scaffold_sets = rng.permutation(list(scaffolds.values())) n_total_valid = int(np.floor(frac_valid * len(dataset))) n_total_test = int(np.floor(frac_test * len(dataset))) train_idx = [] valid_idx = [] test_idx = [] for scaffold_set in scaffold_sets: if len(valid_idx) + len(scaffold_set) <= n_total_valid: valid_idx.extend(scaffold_set) elif len(test_idx) + len(scaffold_set) <= n_total_test: test_idx.extend(scaffold_set) else: train_idx.extend(scaffold_set) train_dataset = dataset[train_idx] valid_dataset = dataset[valid_idx] test_dataset = dataset[test_idx] return train_dataset, valid_dataset, test_dataset
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ splitters """ import random import numpy as np from itertools import compress from rdkit.Chem.Scaffolds import MurckoScaffold from collections import defaultdict from sklearn.model_selection import StratifiedKFold __all__ = [ 'RandomSplitter', 'IndexSplitter', 'ScaffoldSplitter', 'RandomScaffoldSplitter', ] def generate_scaffold(smiles, include_chirality=False): """ Obtain Bemis-Murcko scaffold from smiles Args: smiles: include_chirality: Return: scaffold: the scaffold of the given smiles. """ scaffold = MurckoScaffold.MurckoScaffoldSmiles( smiles=smiles, includeChirality=include_chirality) return scaffold class Splitter(object): """ The abstract class of splitters which split up dataset into train/valid/test subsets. """ def __init__(self): super(Splitter, self).__init__() class RandomSplitter(Splitter): """ Random splitter. """ def __init__(self): super(RandomSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None, seed=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. seed(int|None): the random seed. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) indices = list(range(N)) rng = np.random.RandomState(seed) rng.shuffle(indices) train_cutoff = int(frac_train * N) valid_cutoff = int((frac_train + frac_valid) * N) train_dataset = dataset[indices[:train_cutoff]] valid_dataset = dataset[indices[train_cutoff:valid_cutoff]] test_dataset = dataset[indices[valid_cutoff:]] return train_dataset, valid_dataset, test_dataset class IndexSplitter(Splitter): """ Split daatasets that has already been orderd. The first `frac_train` proportion is used for train set, the next `frac_valid` for valid set and the final `frac_test` for test set. """ def __init__(self): super(IndexSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) indices = list(range(N)) train_cutoff = int(frac_train * N) valid_cutoff = int((frac_train + frac_valid) * N) train_dataset = dataset[indices[:train_cutoff]] valid_dataset = dataset[indices[train_cutoff:valid_cutoff]] test_dataset = dataset[indices[valid_cutoff:]] return train_dataset, valid_dataset, test_dataset class ScaffoldSplitter(Splitter): """ Adapted from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py Split dataset by Bemis-Murcko scaffolds """ def __init__(self): super(ScaffoldSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) # create dict of the form {scaffold_i: [idx1, idx....]} all_scaffolds = {} for i in range(N): scaffold = generate_scaffold(dataset[i]['smiles'], include_chirality=True) if scaffold not in all_scaffolds: all_scaffolds[scaffold] = [i] else: all_scaffolds[scaffold].append(i) # sort from largest to smallest sets all_scaffolds = {key: sorted(value) for key, value in all_scaffolds.items()} all_scaffold_sets = [ scaffold_set for (scaffold, scaffold_set) in sorted( all_scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True) ] # get train, valid test indices train_cutoff = frac_train * N valid_cutoff = (frac_train + frac_valid) * N train_idx, valid_idx, test_idx = [], [], [] for scaffold_set in all_scaffold_sets: if len(train_idx) + len(scaffold_set) > train_cutoff: if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff: test_idx.extend(scaffold_set) else: valid_idx.extend(scaffold_set) else: train_idx.extend(scaffold_set) assert len(set(train_idx).intersection(set(valid_idx))) == 0 assert len(set(test_idx).intersection(set(valid_idx))) == 0 # get train, valid test indices train_cutoff = frac_train * N valid_cutoff = (frac_train + frac_valid) * N train_idx, valid_idx, test_idx = [], [], [] for scaffold_set in all_scaffold_sets: if len(train_idx) + len(scaffold_set) > train_cutoff: if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff: test_idx.extend(scaffold_set) else: valid_idx.extend(scaffold_set) else: train_idx.extend(scaffold_set) assert len(set(train_idx).intersection(set(valid_idx))) == 0 assert len(set(test_idx).intersection(set(valid_idx))) == 0 train_dataset = dataset[train_idx] valid_dataset = dataset[valid_idx] test_dataset = dataset[test_idx] return train_dataset, valid_dataset, test_dataset class RandomScaffoldSplitter(Splitter): """ Adapted from https://github.com/pfnet-research/chainer-chemistry/blob/master/chainer_chemistry/dataset/splitters/scaffold_splitter.py Split dataset by Bemis-Murcko scaffolds """ def __init__(self): super(RandomScaffoldSplitter, self).__init__() def split(self, dataset, frac_train=None, frac_valid=None, frac_test=None, seed=None): """ Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. seed(int|None): the random seed. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.0) N = len(dataset) rng = np.random.RandomState(seed) scaffolds = defaultdict(list) for ind in range(N): scaffold = generate_scaffold(dataset[ind]['smiles'], include_chirality=True) scaffolds[scaffold].append(ind) scaffold_sets = rng.permutation(list(scaffolds.values())) n_total_valid = int(np.floor(frac_valid * len(dataset))) n_total_test = int(np.floor(frac_test * len(dataset))) train_idx = [] valid_idx = [] test_idx = [] for scaffold_set in scaffold_sets: if len(valid_idx) + len(scaffold_set) <= n_total_valid: valid_idx.extend(scaffold_set) elif len(test_idx) + len(scaffold_set) <= n_total_test: test_idx.extend(scaffold_set) else: train_idx.extend(scaffold_set) train_dataset = dataset[train_idx] valid_dataset = dataset[valid_idx] test_dataset = dataset[test_idx] return train_dataset, valid_dataset, test_dataset
en
0.687837
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. splitters Obtain Bemis-Murcko scaffold from smiles Args: smiles: include_chirality: Return: scaffold: the scaffold of the given smiles. The abstract class of splitters which split up dataset into train/valid/test subsets. Random splitter. Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. seed(int|None): the random seed. Split daatasets that has already been orderd. The first `frac_train` proportion is used for train set, the next `frac_valid` for valid set and the final `frac_test` for test set. Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. Adapted from https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py Split dataset by Bemis-Murcko scaffolds Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. # create dict of the form {scaffold_i: [idx1, idx....]} # sort from largest to smallest sets # get train, valid test indices # get train, valid test indices Adapted from https://github.com/pfnet-research/chainer-chemistry/blob/master/chainer_chemistry/dataset/splitters/scaffold_splitter.py Split dataset by Bemis-Murcko scaffolds Args: dataset(InMemoryDataset): the dataset to split. frac_train(float): the fraction of data to be used for the train split. frac_valid(float): the fraction of data to be used for the valid split. frac_test(float): the fraction of data to be used for the test split. seed(int|None): the random seed.
2.399199
2
streambox/test/tweet.py
chenzongxiong/streambox
3
6629811
all_tests = [ { "name" : "test-tweet-fast", "exec" : "./test-tweet.bin", "records" : 1000 * 1000, # records per epoch "record_size" : 200, "target_ms" : 1000, "input_file" : "/ssd/twitter_download/filtered_tweets.txt", # --- optional --- # # "cores" : 54, # if unspecified, fall back to app default "tput_baseline" : 5000, # used to be compared with the test results "tput_hint" : 4000, # the throughput value that test should try first # --- control --- # # "disable" : True # XXX skip the test }, ]
all_tests = [ { "name" : "test-tweet-fast", "exec" : "./test-tweet.bin", "records" : 1000 * 1000, # records per epoch "record_size" : 200, "target_ms" : 1000, "input_file" : "/ssd/twitter_download/filtered_tweets.txt", # --- optional --- # # "cores" : 54, # if unspecified, fall back to app default "tput_baseline" : 5000, # used to be compared with the test results "tput_hint" : 4000, # the throughput value that test should try first # --- control --- # # "disable" : True # XXX skip the test }, ]
en
0.746804
# records per epoch # --- optional --- # # "cores" : 54, # if unspecified, fall back to app default # used to be compared with the test results # the throughput value that test should try first # --- control --- # # "disable" : True # XXX skip the test
1.3836
1
lxmlx/test/test_xml_writer.py
innodatalabs/lxmlx
5
6629812
import unittest from lxmlx.xml_writer import XmlWriter import io import lxml.etree as et from lxmlx.event import scan class XmlWriterHelper(XmlWriter): def __init__(self, xml_declaration=False): self.__io = io.BytesIO() XmlWriter.__init__(self, self.__io, xml_declaration=xml_declaration) @property def data(self): return self.__io.getvalue() class TestXmlWriter(unittest.TestCase): def _test_roundtrip(self, text, model=None, nsmap=None): xml = et.fromstring(text) if nsmap is None: nsmap = xml.nsmap w = XmlWriterHelper() w.write_events(scan(xml), nsmap=nsmap) if model is None: model = text if w.data != model: print(model) print(et.tostring(xml)) print(w.data) self.fail('Not equal') def test01(self): w = XmlWriterHelper() w.write_enter('root') w.write_exit('root') model = b'<root/>' if model != w.data: print(model) print(w.data) self.fail('Not equal') def test02(self): w = XmlWriterHelper(xml_declaration=True) w.write_enter('root') w.write_exit('root') model = b"<?xml version='1.0' encoding='utf-8'?>\n<root/>" if model != w.data: print(model) print(w.data) self.fail('Not equal') def test03(self): self._test_roundtrip(b'<root/>') self._test_roundtrip(b'<root>Hello</root>') self._test_roundtrip(b'<root>Hello&amp;</root>') self._test_roundtrip(b'<root>H&#65;ello&amp;</root>', model=b'<root>HAello&amp;</root>') def test04(self): self._test_roundtrip(b'<root lang="en"/>') self._test_roundtrip(b'<root text="hello&apos;&#10;here"/>', model=b'<root text="hello\'&#10;here"/>') def test05(self): self._test_roundtrip(b'<root><!-- this is a comment --></root>') def test06(self): self._test_roundtrip(b'<root><?pi1 this is a pi?></root>') def test07(self): self._test_roundtrip(b'<root xmlns:a="ns-a"/>') def test08(self): self._test_roundtrip(b'<root xmlns:a="ns-a"><child xmlns:b="ns-a"/></root>') def test08(self): self._test_roundtrip(b'<root xmlns:a="ns-a"><a:child a:lang="en"/></root>') if __name__ == '__main__': unittest.main()
import unittest from lxmlx.xml_writer import XmlWriter import io import lxml.etree as et from lxmlx.event import scan class XmlWriterHelper(XmlWriter): def __init__(self, xml_declaration=False): self.__io = io.BytesIO() XmlWriter.__init__(self, self.__io, xml_declaration=xml_declaration) @property def data(self): return self.__io.getvalue() class TestXmlWriter(unittest.TestCase): def _test_roundtrip(self, text, model=None, nsmap=None): xml = et.fromstring(text) if nsmap is None: nsmap = xml.nsmap w = XmlWriterHelper() w.write_events(scan(xml), nsmap=nsmap) if model is None: model = text if w.data != model: print(model) print(et.tostring(xml)) print(w.data) self.fail('Not equal') def test01(self): w = XmlWriterHelper() w.write_enter('root') w.write_exit('root') model = b'<root/>' if model != w.data: print(model) print(w.data) self.fail('Not equal') def test02(self): w = XmlWriterHelper(xml_declaration=True) w.write_enter('root') w.write_exit('root') model = b"<?xml version='1.0' encoding='utf-8'?>\n<root/>" if model != w.data: print(model) print(w.data) self.fail('Not equal') def test03(self): self._test_roundtrip(b'<root/>') self._test_roundtrip(b'<root>Hello</root>') self._test_roundtrip(b'<root>Hello&amp;</root>') self._test_roundtrip(b'<root>H&#65;ello&amp;</root>', model=b'<root>HAello&amp;</root>') def test04(self): self._test_roundtrip(b'<root lang="en"/>') self._test_roundtrip(b'<root text="hello&apos;&#10;here"/>', model=b'<root text="hello\'&#10;here"/>') def test05(self): self._test_roundtrip(b'<root><!-- this is a comment --></root>') def test06(self): self._test_roundtrip(b'<root><?pi1 this is a pi?></root>') def test07(self): self._test_roundtrip(b'<root xmlns:a="ns-a"/>') def test08(self): self._test_roundtrip(b'<root xmlns:a="ns-a"><child xmlns:b="ns-a"/></root>') def test08(self): self._test_roundtrip(b'<root xmlns:a="ns-a"><a:child a:lang="en"/></root>') if __name__ == '__main__': unittest.main()
en
0.125846
#65;ello&amp;</root>', model=b'<root>HAello&amp;</root>') #10;here"/>', model=b'<root text="hello\'&#10;here"/>')
2.772478
3
grtr/utils.py
msamogh/GRTr
0
6629813
<filename>grtr/utils.py # Copyright (c) 2019-present, HuggingFace Inc. # All rights reserved. This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import copy import logging import os import tarfile import tempfile from collections import deque, defaultdict, Counter from heapq import heappop, heappush, heappushpop from itertools import chain from zipfile import ZipFile import numpy as np import torch import torch.nn.functional as F from pytorch_transformers import cached_path, GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME from mldc.data.schema import DataSpec from mldc.preprocessing.stream import stream_dlgs_many from torch.utils.data import DataLoader, TensorDataset from grtr.env_utils import OUTPUT_DIR PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json" HF_FINETUNED_MODEL = "https://s3.amazonaws.com/models.huggingface.co/transfer-learning-chatbot/finetuned_chatbot_gpt.tar.gz" SPECIAL_TOKENS = { 'bos_token': '<bos>', 'eos_token': '<eos>', 'pad_token': '<pad>', 'additional_special_tokens': ['<speaker1>', '<speaker2>'] } MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids"] PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids"] logger = logging.getLogger(__file__) class MetaLWozDataset(torch.utils.data.Dataset): def __init__(self, dialogues, force_min_size=0): def valid_dlg(dlg): if dlg['domain'] == 'catsstandingup': return False for turn in dlg['turns']: if len(turn) == 0: print("Found invalid dialogue.") return False return True dataset = [(dlg, dlg['domain'] + "--" + dlg['task_id']) for dlg in dialogues if valid_dlg(dlg)] if force_min_size: cnt = Counter([d[1] for d in dataset]) dataset = [(dlg, domain) for dlg, domain in dataset if cnt[domain] >= force_min_size] self.items = dataset @staticmethod def from_dataspec(tokenizer, zipfile_path, dataspec_path, fold, args, dataset_cache, force_min_size=0): with open(dataspec_path) as dataspec_in: dataspec = DataSpec.load(dataspec_in) for fold_name, fold_paths, fold_tasks in zip(['train', 'valid', 'test'], dataspec.unpack_paths(), dataspec.unpack_tasks()): if fold_name != fold: continue dialogues = load_metalwoz_dialogues(tokenizer, zipfile_path, args, filenames=fold_paths, tasks=fold_tasks, dataset_cache=dataset_cache) return MetaLWozDataset(dialogues, force_min_size=force_min_size) @staticmethod def from_testspec_entry(tokenizer, zipfile_path, testspec_item, args, dataset_cache, filenames=None): domain_dialogues = load_metalwoz_dialogues(tokenizer, zipfile_path, args, filenames=filenames, dataset_cache=dataset_cache) support_dialogues = [dlg for dlg in domain_dialogues if dlg['id'] in testspec_item['support_dlgs']] return MetaLWozDataset(support_dialogues) def __len__(self): return len(self.items) def __getitem__(self, key): return self.items[key] def download_pretrained_model(): """ Download and extract finetuned model from S3 """ resolved_archive_file = cached_path(HF_FINETUNED_MODEL) tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) return tempdir def load_metalwoz_dialogues(tokenizer, zipfile_path, args, filenames=None, tasks=None, dataset_cache=None): if dataset_cache and os.path.isfile(dataset_cache): logger.info("Load tokenized dataset from cache at %s", dataset_cache) dataset = torch.load(dataset_cache) else: logger.info("Tokenize and encode the dataset") if filenames is None: filenames = [entry for entry in ZipFile(zipfile_path).namelist() if entry.startswith('dialogues/') and entry.endswith('.txt')] dataset = [] tasks = set(tasks if tasks else []) for dialogue in stream_dlgs_many(zipfile_path, filenames): if tasks and dialogue.task_id not in tasks: continue turns_tokenized = [tokenizer.encode(' '.join(turn.split()[:args.max_utterance_length])) for turn in dialogue.turns] dialogue_json = {'id': dialogue.id, 'domain': dialogue.domain, 'task_id': dialogue.task_id, 'user_id': dialogue.user_id, 'bot_id': dialogue.bot_id, 'turns': turns_tokenized} dataset.append(dialogue_json) if dataset_cache: torch.save(dataset, dataset_cache) return dataset def get_metalwoz_dataset(tokenizer, zipfile_path, dataspec_path, dataset_cache=None): with open(dataspec_path) as dataspec_in: dataspec = DataSpec.load(dataspec_in) dataset = {} for fold_name, fold_paths, fold_tasks in zip(['train', 'valid', 'test'], dataspec.unpack_paths(), dataspec.unpack_tasks()): dataset[fold_name] = load_metalwoz_dialogues(tokenizer, zipfile_path, None, filenames=fold_paths, tasks=fold_tasks, dataset_cache=dataset_cache) return dataset def populate_candidates_cache(in_dialogues, max_len): all_turns = [] for dialogue in in_dialogues: if isinstance(dialogue, tuple): dialogue = dialogue[0] # metadataset instances are (x, domain) all_turns += dialogue['turns'] cache_idx = np.random.choice(list(range(len(all_turns))), size=max_len) result = deque([], maxlen=max_len) for idx in cache_idx: result.append(all_turns[idx]) return result def build_input_from_segments(history, reply, tokenizer, lm_labels=False, with_eos=True): """ Build a sequence of input from 3 segments: persona, history and last reply """ speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS['additional_special_tokens']) bos, eos = tokenizer.bos_token_id, tokenizer.eos_token_id instance = {} sequence = [[bos]] + history + [reply + ([eos] if with_eos else [])] sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])] instance["input_ids"] = list(chain(*sequence)) instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] instance["mc_token_ids"] = len(instance["input_ids"]) - 1 instance["lm_labels"] = [-1] * len(instance["input_ids"]) if lm_labels: instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] return instance, sequence def slice_dialogue_into_gpt2_input(dialogue, max_history, shared_candidates_cache, num_candidates): history = deque([], maxlen=max_history * 2 - 1) result = [] for idx in range(1, len(dialogue), 2): history.append(dialogue[idx - 1]) utterance = dialogue[idx] # candiates are: {num_candidates - 1} distractors + gold response candidates = [] while len(candidates) < num_candidates - 1: candidate = np.random.choice(shared_candidates_cache) if candidate == utterance: continue candidates.append(candidate) candidates.append(utterance) shared_candidates_cache.append(utterance) result.append((list(history), candidates)) history.append(utterance) return result def pad_dataset(dataset, padding=0): """ Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """ max_l = max(len(x) for x in dataset["input_ids"]) for name in PADDED_INPUTS: dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]] return dataset def pad_batch(batch, num_candidates, padding=0): """ Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """ max_l = max(len(cand) for item in batch for cand in item['input_ids']) padded = dict() for name in PADDED_INPUTS: padded[name] = np.full((len(batch), num_candidates, max_l), padding if name != 'lm_labels' else -1) for batch_idx, item in enumerate(batch): assert len(item[name]) == num_candidates for cand_idx, cand in enumerate(item[name]): padded[name][batch_idx, cand_idx, :len(cand)] = cand tensorized_batch = [] for input_name in MODEL_INPUTS: if input_name in padded: tensor = torch.tensor(padded[input_name]) else: tensor = torch.tensor([x[input_name] for x in batch]) tensorized_batch.append(tensor) return tensorized_batch def average_distributed_scalar(scalar, args): """ Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """ if args.local_rank == -1: return scalar scalar_t = torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size() torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM) return scalar_t.item() def get_loader_for_dataset(dataset, tokenizer, max_history, num_candidates, batch_size, dataloader_num_workers=0, distributed=False, max_samples=0): candidates_cache = populate_candidates_cache(dataset, max_len=1000) logger.info("Build inputs and labels") featurized_dataset = defaultdict(list) for dialog_json in dataset: dialogue = dialog_json['turns'] if len(dialogue) < 2: continue for history, candidates in slice_dialogue_into_gpt2_input(dialogue, max_history, candidates_cache, num_candidates): for j, candidate in enumerate(candidates): lm_labels = bool(j == num_candidates - 1) instance, _ = build_input_from_segments(history, candidate, tokenizer, lm_labels) for input_name, input_array in instance.items(): featurized_dataset[input_name].append(input_array) featurized_dataset["mc_labels"].append(num_candidates - 1) featurized_dataset["n_candidates"] = num_candidates logger.info("Pad inputs and convert to Tensor") tensorized_dataset = [] dataset_padded = pad_dataset(featurized_dataset, padding=tokenizer.pad_token_id) for input_name in MODEL_INPUTS: tensor = torch.tensor(dataset_padded[input_name]) if input_name != "mc_labels": tensor = tensor.view((-1, featurized_dataset["n_candidates"]) + tensor.shape[1:]) tensorized_dataset.append(tensor) if 0 < max_samples: # shuffling and trimming to max_samples shuffle_index = list(range(len(featurized_dataset['mc_labels']))) np.random.shuffle(shuffle_index) shuffle_index = shuffle_index[:max_samples] tensorized_dataset_trimmed = [] for tensor_i in tensorized_dataset: tensorized_dataset_trimmed.append(tensor_i[shuffle_index, ...]) tensorized_dataset = tensorized_dataset_trimmed logger.info("Build dataloader") tensor_dataset = TensorDataset(*tensorized_dataset) sampler = torch.utils.data.distributed.DistributedSampler(tensor_dataset) if distributed else None loader = DataLoader(tensor_dataset, sampler=sampler, batch_size=batch_size, num_workers=dataloader_num_workers, shuffle=(not distributed), drop_last=True) return loader, sampler def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')): """ Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits """ assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code top_k = min(top_k, logits.size(-1)) if top_k > 0: # Remove all tokens with a probability less than the last token in the top-k tokens indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: # Compute cumulative probabilities of sorted tokens sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probabilities > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # Back to unsorted indices and set them to -infinity indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = logits < threshold logits[indices_to_remove] = filter_value return logits def sample_sequence(history, tokenizer, model, args, current_output=None): special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS['additional_special_tokens']) + [ tokenizer.eos_token_id, tokenizer.bos_token_id + tokenizer.pad_token_id ] if current_output is None: current_output = [] for i in range(args.max_length): instance, sequence = build_input_from_segments(history, current_output, tokenizer, with_eos=False) input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0) token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0) logits = model(input_ids, token_type_ids=token_type_ids)[0] logits = logits[0, -1, :] / args.temperature logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p) probs = F.softmax(logits, dim=-1) prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1) if i < args.min_length and prev.item() in special_tokens_ids: for _ in range(10): prev = torch.multinomial(probs, num_samples=1) if prev.item() not in special_tokens_ids: break if prev.item() in special_tokens_ids: break current_output.append(prev.item()) return current_output def generate_and_rank(support_contexts, support_responses, target_context, tokenizer, model, encoder, args, ret_winner=False): generated = sample_sequence(target_context, tokenizer, model, args) winner = 'generated' if not len(support_contexts): return (generated, winner) if ret_winner else generated target_context_emb = embed_dialogue(target_context, [], tokenizer, encoder, args) num_ret_candidates = args.num_candidates - 1 candidates_heap = [] for idx, support_context in enumerate(support_contexts): support_context_emb = embed_dialogue(support_context, None, tokenizer, encoder, args) distance = target_context_emb.dist(support_context_emb) if len(candidates_heap) < num_ret_candidates: heappush(candidates_heap, (-distance, idx)) else: heappushpop(candidates_heap, (-distance, idx)) if len(candidates_heap) == 0: return (generated, winner) if ret_winner else generated ret_instances = [] candidates = [] while len(ret_instances) != num_ret_candidates: if len(candidates_heap) == 0: ret_instances.append(copy.deepcopy(ret_instances[-1])) candidates.append(copy.deepcopy(candidates[-1])) continue _, idx = heappop(candidates_heap) ret_instance, _ = build_input_from_segments(target_context, support_responses[idx], tokenizer, with_eos=True) ret_instances.append(ret_instance) candidates.append(support_responses[idx]) gen_instance, _ = build_input_from_segments(target_context, generated, tokenizer, with_eos=True) candidates.append(generated) data_point = defaultdict(list) for instance in ret_instances + [gen_instance]: for key, value in instance.items(): data_point[key].append(value) data_point["mc_labels"].append(args.num_candidates - 1) data_point["num_candidates"] = args.num_candidates tensorized_data_point = [] dataset_padded = pad_dataset(data_point, padding=tokenizer.pad_token_id) for input_name in MODEL_INPUTS: tensor = torch.tensor(dataset_padded[input_name]) if input_name != "mc_labels": tensor = tensor.view((-1, data_point["num_candidates"]) + tensor.shape[1:]) if args.device == 'cuda': tensor = tensor.cuda() tensorized_data_point.append(tensor) model_output = model(*tensorized_data_point) mc_labels = model_output[3] arg_max = mc_labels.max(-1)[1] if arg_max != args.num_candidates - 1: winner = 'retrieved' logger.info(f"{winner} response won") return (candidates[arg_max], winner) if ret_winner else candidates[arg_max] def embed_dialogue(context, response, tokenizer, encoder, args): if response is None: response = [] instance, sequence = build_input_from_segments(context, response, tokenizer, with_eos=True) input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0) token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0) cls_index = torch.tensor(instance["mc_token_ids"], device=args.device).unsqueeze(0) emb = encoder(input_ids, token_type_ids=token_type_ids)[0] cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (emb.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states emb = emb.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) return emb def get_blank_training_state(): return {'epoch': 0, 'step': 0, 'best_loss': np.inf, 'steps_without_improvement': 0} def load_tokenizer_and_model(in_args): tokenizer_class = GPT2Tokenizer model_class = GPT2DoubleHeadsModel checkpoint_full_path = os.path.join(OUTPUT_DIR, in_args.model_checkpoint) weights_full_path = os.path.join(checkpoint_full_path, WEIGHTS_NAME) checkpoint_to_load = checkpoint_full_path if os.path.exists(weights_full_path) else in_args.model_name tokenizer = tokenizer_class.from_pretrained(checkpoint_to_load) model = model_class.from_pretrained(checkpoint_to_load) return tokenizer, model def save_model(in_model, in_dst_file): torch.save(in_model.state_dict(), in_dst_file) def load_model(in_model, in_src_file): in_model.load_state_dict(torch.load(in_src_file))
<filename>grtr/utils.py # Copyright (c) 2019-present, HuggingFace Inc. # All rights reserved. This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import copy import logging import os import tarfile import tempfile from collections import deque, defaultdict, Counter from heapq import heappop, heappush, heappushpop from itertools import chain from zipfile import ZipFile import numpy as np import torch import torch.nn.functional as F from pytorch_transformers import cached_path, GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME from mldc.data.schema import DataSpec from mldc.preprocessing.stream import stream_dlgs_many from torch.utils.data import DataLoader, TensorDataset from grtr.env_utils import OUTPUT_DIR PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json" HF_FINETUNED_MODEL = "https://s3.amazonaws.com/models.huggingface.co/transfer-learning-chatbot/finetuned_chatbot_gpt.tar.gz" SPECIAL_TOKENS = { 'bos_token': '<bos>', 'eos_token': '<eos>', 'pad_token': '<pad>', 'additional_special_tokens': ['<speaker1>', '<speaker2>'] } MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids"] PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids"] logger = logging.getLogger(__file__) class MetaLWozDataset(torch.utils.data.Dataset): def __init__(self, dialogues, force_min_size=0): def valid_dlg(dlg): if dlg['domain'] == 'catsstandingup': return False for turn in dlg['turns']: if len(turn) == 0: print("Found invalid dialogue.") return False return True dataset = [(dlg, dlg['domain'] + "--" + dlg['task_id']) for dlg in dialogues if valid_dlg(dlg)] if force_min_size: cnt = Counter([d[1] for d in dataset]) dataset = [(dlg, domain) for dlg, domain in dataset if cnt[domain] >= force_min_size] self.items = dataset @staticmethod def from_dataspec(tokenizer, zipfile_path, dataspec_path, fold, args, dataset_cache, force_min_size=0): with open(dataspec_path) as dataspec_in: dataspec = DataSpec.load(dataspec_in) for fold_name, fold_paths, fold_tasks in zip(['train', 'valid', 'test'], dataspec.unpack_paths(), dataspec.unpack_tasks()): if fold_name != fold: continue dialogues = load_metalwoz_dialogues(tokenizer, zipfile_path, args, filenames=fold_paths, tasks=fold_tasks, dataset_cache=dataset_cache) return MetaLWozDataset(dialogues, force_min_size=force_min_size) @staticmethod def from_testspec_entry(tokenizer, zipfile_path, testspec_item, args, dataset_cache, filenames=None): domain_dialogues = load_metalwoz_dialogues(tokenizer, zipfile_path, args, filenames=filenames, dataset_cache=dataset_cache) support_dialogues = [dlg for dlg in domain_dialogues if dlg['id'] in testspec_item['support_dlgs']] return MetaLWozDataset(support_dialogues) def __len__(self): return len(self.items) def __getitem__(self, key): return self.items[key] def download_pretrained_model(): """ Download and extract finetuned model from S3 """ resolved_archive_file = cached_path(HF_FINETUNED_MODEL) tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) return tempdir def load_metalwoz_dialogues(tokenizer, zipfile_path, args, filenames=None, tasks=None, dataset_cache=None): if dataset_cache and os.path.isfile(dataset_cache): logger.info("Load tokenized dataset from cache at %s", dataset_cache) dataset = torch.load(dataset_cache) else: logger.info("Tokenize and encode the dataset") if filenames is None: filenames = [entry for entry in ZipFile(zipfile_path).namelist() if entry.startswith('dialogues/') and entry.endswith('.txt')] dataset = [] tasks = set(tasks if tasks else []) for dialogue in stream_dlgs_many(zipfile_path, filenames): if tasks and dialogue.task_id not in tasks: continue turns_tokenized = [tokenizer.encode(' '.join(turn.split()[:args.max_utterance_length])) for turn in dialogue.turns] dialogue_json = {'id': dialogue.id, 'domain': dialogue.domain, 'task_id': dialogue.task_id, 'user_id': dialogue.user_id, 'bot_id': dialogue.bot_id, 'turns': turns_tokenized} dataset.append(dialogue_json) if dataset_cache: torch.save(dataset, dataset_cache) return dataset def get_metalwoz_dataset(tokenizer, zipfile_path, dataspec_path, dataset_cache=None): with open(dataspec_path) as dataspec_in: dataspec = DataSpec.load(dataspec_in) dataset = {} for fold_name, fold_paths, fold_tasks in zip(['train', 'valid', 'test'], dataspec.unpack_paths(), dataspec.unpack_tasks()): dataset[fold_name] = load_metalwoz_dialogues(tokenizer, zipfile_path, None, filenames=fold_paths, tasks=fold_tasks, dataset_cache=dataset_cache) return dataset def populate_candidates_cache(in_dialogues, max_len): all_turns = [] for dialogue in in_dialogues: if isinstance(dialogue, tuple): dialogue = dialogue[0] # metadataset instances are (x, domain) all_turns += dialogue['turns'] cache_idx = np.random.choice(list(range(len(all_turns))), size=max_len) result = deque([], maxlen=max_len) for idx in cache_idx: result.append(all_turns[idx]) return result def build_input_from_segments(history, reply, tokenizer, lm_labels=False, with_eos=True): """ Build a sequence of input from 3 segments: persona, history and last reply """ speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS['additional_special_tokens']) bos, eos = tokenizer.bos_token_id, tokenizer.eos_token_id instance = {} sequence = [[bos]] + history + [reply + ([eos] if with_eos else [])] sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])] instance["input_ids"] = list(chain(*sequence)) instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] instance["mc_token_ids"] = len(instance["input_ids"]) - 1 instance["lm_labels"] = [-1] * len(instance["input_ids"]) if lm_labels: instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] return instance, sequence def slice_dialogue_into_gpt2_input(dialogue, max_history, shared_candidates_cache, num_candidates): history = deque([], maxlen=max_history * 2 - 1) result = [] for idx in range(1, len(dialogue), 2): history.append(dialogue[idx - 1]) utterance = dialogue[idx] # candiates are: {num_candidates - 1} distractors + gold response candidates = [] while len(candidates) < num_candidates - 1: candidate = np.random.choice(shared_candidates_cache) if candidate == utterance: continue candidates.append(candidate) candidates.append(utterance) shared_candidates_cache.append(utterance) result.append((list(history), candidates)) history.append(utterance) return result def pad_dataset(dataset, padding=0): """ Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """ max_l = max(len(x) for x in dataset["input_ids"]) for name in PADDED_INPUTS: dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]] return dataset def pad_batch(batch, num_candidates, padding=0): """ Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """ max_l = max(len(cand) for item in batch for cand in item['input_ids']) padded = dict() for name in PADDED_INPUTS: padded[name] = np.full((len(batch), num_candidates, max_l), padding if name != 'lm_labels' else -1) for batch_idx, item in enumerate(batch): assert len(item[name]) == num_candidates for cand_idx, cand in enumerate(item[name]): padded[name][batch_idx, cand_idx, :len(cand)] = cand tensorized_batch = [] for input_name in MODEL_INPUTS: if input_name in padded: tensor = torch.tensor(padded[input_name]) else: tensor = torch.tensor([x[input_name] for x in batch]) tensorized_batch.append(tensor) return tensorized_batch def average_distributed_scalar(scalar, args): """ Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """ if args.local_rank == -1: return scalar scalar_t = torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size() torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM) return scalar_t.item() def get_loader_for_dataset(dataset, tokenizer, max_history, num_candidates, batch_size, dataloader_num_workers=0, distributed=False, max_samples=0): candidates_cache = populate_candidates_cache(dataset, max_len=1000) logger.info("Build inputs and labels") featurized_dataset = defaultdict(list) for dialog_json in dataset: dialogue = dialog_json['turns'] if len(dialogue) < 2: continue for history, candidates in slice_dialogue_into_gpt2_input(dialogue, max_history, candidates_cache, num_candidates): for j, candidate in enumerate(candidates): lm_labels = bool(j == num_candidates - 1) instance, _ = build_input_from_segments(history, candidate, tokenizer, lm_labels) for input_name, input_array in instance.items(): featurized_dataset[input_name].append(input_array) featurized_dataset["mc_labels"].append(num_candidates - 1) featurized_dataset["n_candidates"] = num_candidates logger.info("Pad inputs and convert to Tensor") tensorized_dataset = [] dataset_padded = pad_dataset(featurized_dataset, padding=tokenizer.pad_token_id) for input_name in MODEL_INPUTS: tensor = torch.tensor(dataset_padded[input_name]) if input_name != "mc_labels": tensor = tensor.view((-1, featurized_dataset["n_candidates"]) + tensor.shape[1:]) tensorized_dataset.append(tensor) if 0 < max_samples: # shuffling and trimming to max_samples shuffle_index = list(range(len(featurized_dataset['mc_labels']))) np.random.shuffle(shuffle_index) shuffle_index = shuffle_index[:max_samples] tensorized_dataset_trimmed = [] for tensor_i in tensorized_dataset: tensorized_dataset_trimmed.append(tensor_i[shuffle_index, ...]) tensorized_dataset = tensorized_dataset_trimmed logger.info("Build dataloader") tensor_dataset = TensorDataset(*tensorized_dataset) sampler = torch.utils.data.distributed.DistributedSampler(tensor_dataset) if distributed else None loader = DataLoader(tensor_dataset, sampler=sampler, batch_size=batch_size, num_workers=dataloader_num_workers, shuffle=(not distributed), drop_last=True) return loader, sampler def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')): """ Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits """ assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code top_k = min(top_k, logits.size(-1)) if top_k > 0: # Remove all tokens with a probability less than the last token in the top-k tokens indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: # Compute cumulative probabilities of sorted tokens sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probabilities > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # Back to unsorted indices and set them to -infinity indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value indices_to_remove = logits < threshold logits[indices_to_remove] = filter_value return logits def sample_sequence(history, tokenizer, model, args, current_output=None): special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS['additional_special_tokens']) + [ tokenizer.eos_token_id, tokenizer.bos_token_id + tokenizer.pad_token_id ] if current_output is None: current_output = [] for i in range(args.max_length): instance, sequence = build_input_from_segments(history, current_output, tokenizer, with_eos=False) input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0) token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0) logits = model(input_ids, token_type_ids=token_type_ids)[0] logits = logits[0, -1, :] / args.temperature logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p) probs = F.softmax(logits, dim=-1) prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1) if i < args.min_length and prev.item() in special_tokens_ids: for _ in range(10): prev = torch.multinomial(probs, num_samples=1) if prev.item() not in special_tokens_ids: break if prev.item() in special_tokens_ids: break current_output.append(prev.item()) return current_output def generate_and_rank(support_contexts, support_responses, target_context, tokenizer, model, encoder, args, ret_winner=False): generated = sample_sequence(target_context, tokenizer, model, args) winner = 'generated' if not len(support_contexts): return (generated, winner) if ret_winner else generated target_context_emb = embed_dialogue(target_context, [], tokenizer, encoder, args) num_ret_candidates = args.num_candidates - 1 candidates_heap = [] for idx, support_context in enumerate(support_contexts): support_context_emb = embed_dialogue(support_context, None, tokenizer, encoder, args) distance = target_context_emb.dist(support_context_emb) if len(candidates_heap) < num_ret_candidates: heappush(candidates_heap, (-distance, idx)) else: heappushpop(candidates_heap, (-distance, idx)) if len(candidates_heap) == 0: return (generated, winner) if ret_winner else generated ret_instances = [] candidates = [] while len(ret_instances) != num_ret_candidates: if len(candidates_heap) == 0: ret_instances.append(copy.deepcopy(ret_instances[-1])) candidates.append(copy.deepcopy(candidates[-1])) continue _, idx = heappop(candidates_heap) ret_instance, _ = build_input_from_segments(target_context, support_responses[idx], tokenizer, with_eos=True) ret_instances.append(ret_instance) candidates.append(support_responses[idx]) gen_instance, _ = build_input_from_segments(target_context, generated, tokenizer, with_eos=True) candidates.append(generated) data_point = defaultdict(list) for instance in ret_instances + [gen_instance]: for key, value in instance.items(): data_point[key].append(value) data_point["mc_labels"].append(args.num_candidates - 1) data_point["num_candidates"] = args.num_candidates tensorized_data_point = [] dataset_padded = pad_dataset(data_point, padding=tokenizer.pad_token_id) for input_name in MODEL_INPUTS: tensor = torch.tensor(dataset_padded[input_name]) if input_name != "mc_labels": tensor = tensor.view((-1, data_point["num_candidates"]) + tensor.shape[1:]) if args.device == 'cuda': tensor = tensor.cuda() tensorized_data_point.append(tensor) model_output = model(*tensorized_data_point) mc_labels = model_output[3] arg_max = mc_labels.max(-1)[1] if arg_max != args.num_candidates - 1: winner = 'retrieved' logger.info(f"{winner} response won") return (candidates[arg_max], winner) if ret_winner else candidates[arg_max] def embed_dialogue(context, response, tokenizer, encoder, args): if response is None: response = [] instance, sequence = build_input_from_segments(context, response, tokenizer, with_eos=True) input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0) token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0) cls_index = torch.tensor(instance["mc_token_ids"], device=args.device).unsqueeze(0) emb = encoder(input_ids, token_type_ids=token_type_ids)[0] cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (emb.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states emb = emb.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) return emb def get_blank_training_state(): return {'epoch': 0, 'step': 0, 'best_loss': np.inf, 'steps_without_improvement': 0} def load_tokenizer_and_model(in_args): tokenizer_class = GPT2Tokenizer model_class = GPT2DoubleHeadsModel checkpoint_full_path = os.path.join(OUTPUT_DIR, in_args.model_checkpoint) weights_full_path = os.path.join(checkpoint_full_path, WEIGHTS_NAME) checkpoint_to_load = checkpoint_full_path if os.path.exists(weights_full_path) else in_args.model_name tokenizer = tokenizer_class.from_pretrained(checkpoint_to_load) model = model_class.from_pretrained(checkpoint_to_load) return tokenizer, model def save_model(in_model, in_dst_file): torch.save(in_model.state_dict(), in_dst_file) def load_model(in_model, in_src_file): in_model.load_state_dict(torch.load(in_src_file))
en
0.84782
# Copyright (c) 2019-present, HuggingFace Inc. # All rights reserved. This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. Download and extract finetuned model from S3 # metadataset instances are (x, domain) Build a sequence of input from 3 segments: persona, history and last reply # candiates are: {num_candidates - 1} distractors + gold response Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. # shuffling and trimming to max_samples Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering Args: logits: logits distribution shape (vocabulary size) top_k: <=0: no filtering, >0: keep only top k tokens with highest probability. top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset whose total probability mass is greater than or equal to the threshold top_p. In practice, we select the highest probability tokens whose cumulative probability mass exceeds the threshold top_p. threshold: a minimal threshold to keep logits # Only work for batch size 1 for now - could update but it would obfuscate a bit the code # Remove all tokens with a probability less than the last token in the top-k tokens # Compute cumulative probabilities of sorted tokens # Remove tokens with cumulative probability above the threshold # Shift the indices to the right to keep also the first token above the threshold # Back to unsorted indices and set them to -infinity # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states # shape (bsz, XX, hidden_size)
1.810028
2
pyxdsm/matrix_eqn.py
marcomangano/pyXDSM
0
6629814
from __future__ import division import os import numpy as np from collections import namedtuple # color pallette link: http://paletton.com/#uid=72Q1j0kllllkS5tKC9H96KClOKC base_file_start = r"""\documentclass[border=0pt]{standalone} % <NAME> 2018 % Based off code by <NAME> (2014), who based his code off <NAME>: http://www.alecjacobson.com/weblog/?p=1289 % nc = necessary comment [do not remove] % Four rules for using these macros: % 1. Always start with a row % 2. Rows contain cols and cols contain rows % 3. Mats should be on at least the 3rd level (row->col->mat, minimum) % 4. If a row contains the mat, add &; if a col contains the mat, add \\ % --------------------------------------- \usepackage{tikz} \usepackage{ifthen} \usepackage{esdiff} \usepackage{varwidth} \definecolor{tableau0}{RGB}{77, 121, 168} \definecolor{tableau1}{RGB}{242, 142, 43} \definecolor{tableau2}{RGB}{255, 87, 88} \definecolor{tableau3}{RGB}{118, 183, 178} \definecolor{tableau4}{RGB}{89, 161, 78} \definecolor{tableau5}{RGB}{237, 201, 72} \definecolor{tableau6}{RGB}{176, 121, 162} \definecolor{tableau7}{RGB}{255, 157, 167} \definecolor{tableau8}{RGB}{156, 116, 94} \definecolor{tableau9}{RGB}{186, 176, 172} \newcommand{\thk}{0.01in} \newcommand{\thkln}{0.02in} % \blockmat{width}{height}{text}{block_options}{other} \newcommand{\blockmat}[5]{ \begin{tikzpicture} \draw[draw=white,fill=white,#4,line width=\thk] (0,0) rectangle( #1-\thk,#2-\thk); #5 \draw (#1/2, #2/2) node {#3}; \end{tikzpicture} } % blockempty{width}{height}{text} \newcommand{\blockempty}[3]{ \blockmat{#1}{#2}{#3}{draw=white,fill=white}{}% } % \blockmat{width}{height}{text}{block_options}{diagonal_width}{diagonal_options} \newcommand{\blockdiag}[6]{ \blockmat{#1}{#2}{#3}{#4} { \draw[#6,line width=\thk] (0,#2-\thk) -- (#5,#2-\thk) -- ( #1-\thk,#5) -- ( #1-\thk,0) -- ( #1-\thk - #5,0) -- (0,#2-\thk -#5) --cycle; }% } % \blockddots{width}{height}{text}{block_options}{dot_radius}{dot_options}{dot_h}{dot_v} \newcommand{\blockdots}[8]{ \blockmat{#1}{#2}{#3}{#4}% {% \ifthenelse{\equal{#5}{}} {\newcommand\dotradius{0.01in}} {\newcommand\dotradius{#5}}% \filldraw[#6] (#1/2, #2/2) circle (0.5*\dotradius);% \filldraw[#6] (#1/2 + #7, #2/2 + #8) circle (0.5*\dotradius);% \filldraw[#6] (#1/2 - #7, #2/2 - #8) circle (0.5*\dotradius);% }% } % \leftbracket{width}{height}{options} \newcommand{\leftbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNE) -- (iNW) -- (iSW) -- (iSE); \draw[draw=white,line width=\thk] (oNE) -- (oNW) -- (oSW) -- (oSE); \end{tikzpicture}%nc } % \rightbracket{width}{height}{options} \newcommand{\rightbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNW) -- (iNE) -- (iSE) -- (iSW); \draw[draw=white,line width=\thk] (oNW) -- (oNE) -- (oSE) -- (oSW); \end{tikzpicture}%nc } % \upperbracket{width}{height}{options} \newcommand{\upperbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iSW) -- (iNW) -- (iNE) -- (iSE); \draw[draw=white,line width=\thk] (oSW) -- (oNW) -- (oNE) -- (oSE); \end{tikzpicture}%nc } % \lowerbracket{width}{height}{options} \newcommand{\lowerbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNW) -- (iSW) -- (iSE) -- (iNE); \draw[draw=white,line width=\thk] (oNW) -- (oSW) -- (oSE) -- (oNE); \end{tikzpicture}%nc } % a hack so that I don't have to worry about the number of columns or % spaces between columns in the tabular environment \newenvironment{blockmatrixtabular} {%nc \renewcommand{\arraystretch}{0}%nc \begin{tabular}{ @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{} }%nc } { \end{tabular}%nc } % \blockcol{ % } \newcommand{\blockcol}[1]{\vtop{\null\hbox{\begin{blockmatrixtabular}#1\end{blockmatrixtabular}}}&} % \blockrow{ % } \newcommand{\blockrow}[1]{\begin{blockmatrixtabular}#1\end{blockmatrixtabular}\\} \begin{document} \begin{varwidth}{10\textwidth} \newcommand\mwid{0.5in} \newcommand\wid{0.15in} \newcommand\comp{0.3in} \newcommand\ext{0.5in} \newcommand\dt{0.03in} \newcommand\txt{0.8in} \definecolor{Tgrey}{rgb}{0.9,0.9,0.9} \definecolor{Tred}{rgb}{1.0,0.722,0.714} \definecolor{Tgreen}{rgb}{0.639,0.89,0.655} \definecolor{Tblue}{rgb}{0.667,0.631,0.843} \definecolor{Tyellow}{rgb}{1,0.941,0.714} \definecolor{Lred}{rgb}{17.3,0.063,0.059} \definecolor{Lgreen}{rgb}{0.047,0.133,0.051} \definecolor{Lblue}{rgb}{0.063,0.051,0.118} \definecolor{Lyellow}{rgb}{0.173,0.149,0.059} \definecolor{Dgrey}{rgb}{0.4,0.4,0.4} \definecolor{Dred}{rgb}{1.0,0.333,0.318} \definecolor{Dgreen}{rgb}{0.282,0.89,0.322} \definecolor{Dblue}{rgb}{0.42,0.341,0.843} \definecolor{Dyellow}{rgb}{1.0,0.863,0.318} \definecolor{Bred}{rgb}{0.302,0.8,0.0} \definecolor{Bgreen}{rgb}{0.4,1.0,0.4} \definecolor{Bblue}{rgb}{0.043,0.012,0.208} \definecolor{Byellow}{rgb}{0.302,0.243,0.0} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% """ base_file_end = r""" %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{varwidth} \end{document}""" Variable = namedtuple("Variable", field_names=["size", "idx", "text", "color"]) CellData = namedtuple("CellData", field_names=["text", "color", "highlight"]) def _color(base_color, h_light): if h_light == -1: color = "white" elif h_light == 0: color = "Tgrey" elif h_light == 1: color = "T{}".format(base_color) elif h_light == 2: color = "D{}".format(base_color) elif h_light == 3: color = "B{}".format(base_color) elif h_light == "diag": color = base_color return color def _write_tikz(tikz, out_file, build=True, cleanup=True): with open("{}.tex".format(out_file), "w") as f: f.write(base_file_start) f.write(tikz) f.write(base_file_end) if build: os.system("pdflatex {}.tex".format(out_file)) if cleanup: for ext in ["aux", "fdb_latexmk", "fls", "log", "tex"]: f_name = "{}.{}".format(out_file, ext) if os.path.exists(f_name): os.remove(f_name) class TotalJacobian(object): def __init__(self): self._variables = {} self._j_inputs = {} self._n_inputs = 0 self._i_outputs = {} self._n_outputs = 0 self._connections = {} self._ij_connections = {} self._setup = False def add_input(self, name, size=1, text=""): self._variables[name] = Variable(size=size, idx=self._n_inputs, text=text, color=None) self._j_inputs[self._n_inputs] = self._variables[name] self._n_inputs += 1 def add_output(self, name, size=1, text=""): self._variables[name] = Variable(size=size, idx=self._n_outputs, text=text, color=None) self._i_outputs[self._n_outputs] = self._variables[name] self._n_outputs += 1 def connect(self, src, target, text="", color="tableau0"): if isinstance(target, (list, tuple)): for t in target: self._connections[src, t] = CellData(text=text, color=color, highlight="diag") else: self._connections[src, target] = CellData(text=text, color=color, highlight="diag") def _process_vars(self): if self._setup: return # deal with connections for (src, target), cell_data in self._connections.items(): i_src = self._variables[src].idx j_target = self._variables[target].idx self._ij_connections[i_src, j_target] = cell_data self._setup = True def write(self, out_file=None, build=True, cleanup=True): """ Write output files for the matrix equation diagram. This produces the following: - {file_name}.tikz A file containing the TIKZ definition of the tikz diagram. - {file_name}.tex A standalone document wrapped around an include of the TIKZ file which can be compiled to a pdf. - {file_name}.pdf An optional compiled version of the standalone tex file. Parameters ---------- file_name : str The prefix to be used for the output files build : bool Flag that determines whether the standalone PDF of the XDSM will be compiled. Default is True. cleanup: bool Flag that determines if padlatex build files will be deleted after build is complete """ self._process_vars() tikz = [] # label the columns tikz.append(r"\blockrow{") # emtpy column for the row labels tikz.append(r" \blockcol{") tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (1, 1, "")) tikz.append(r" }") for j in range(self._n_inputs): var = self._j_inputs[j] col_size = var.size tikz.append(r" \blockcol{") tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (col_size, 1, var.text)) tikz.append(r" }") tikz.append(r"}") for i in range(self._n_outputs): output = self._i_outputs[i] row_size = output.size tikz.append(r"\blockrow{") # label the row with the output name tikz.append(r" \blockcol{") tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (1, row_size, output.text)) tikz.append(r" }") for j in range(self._n_inputs): var = self._j_inputs[j] col_size = var.size tikz.append(r" \blockcol{") if (j, i) in self._ij_connections: cell_data = self._ij_connections[(j, i)] conn_color = "T{}".format(var.color) if cell_data.color is not None: conn_color = _color(cell_data.color, cell_data.highlight) tikz.append( r" \blockmat{%s*\comp}{%s*\comp}{%s}{draw=white,fill=%s}{}\\" % (col_size, row_size, cell_data.text, conn_color) ) else: tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{}\\" % (col_size, row_size)) tikz.append(r" }") tikz.append(r"}") jac_tikz = "\n".join(tikz) _write_tikz(jac_tikz, out_file, build, cleanup) class MatrixEquation(object): def __init__(self): self._variables = {} self._ij_variables = {} self._n_vars = 0 self._connections = {} self._ij_connections = {} self._text = {} self._ij_text = {} self._total_size = 0 self._setup = False self._terms = [] def clear_terms(self): self._terms = [] def add_variable(self, name, size=1, text="", color="blue"): self._variables[name] = Variable(size=size, idx=self._n_vars, text=text, color=color) self._ij_variables[self._n_vars] = self._variables[name] self._n_vars += 1 self._total_size += size def connect(self, src, target, text="", color=None, highlight=1): if isinstance(target, (list, tuple)): for t in target: self._connections[src, t] = CellData(text=text, color=color, highlight=highlight) else: self._connections[src, target] = CellData(text=text, color=color, highlight=highlight) def text(self, src, target, text): "don't connect the src and target, but put some text where a connection would be" self._text[src, target] = CellData(text=text, color=None, highlight=-1) def _process_vars(self): """map all the data onto i,j grid""" if self._setup: return # deal with connections for (src, target), cell_data in self._connections.items(): i_src = self._variables[src].idx i_target = self._variables[target].idx self._ij_connections[i_src, i_target] = cell_data for (src, target), cell_data in self._text.items(): i_src = self._variables[src].idx j_target = self._variables[target].idx self._ij_text[i_src, j_target] = cell_data self._setup = True def jacobian(self, transpose=False): self._process_vars() tikz = [] for i in range(self._n_vars): tikz.append(r"\blockrow{") row_size = self._ij_variables[i].size for j in range(self._n_vars): var = self._ij_variables[j] col_size = var.size tikz.append(r" \blockcol{") if transpose: location = (i, j) else: location = (j, i) if i == j: tikz.append( r" \blockmat{%s*\comp}{%s*\comp}{%s}{draw=white,fill=D%s}{}\\" % (col_size, row_size, var.text, var.color) ) elif location in self._ij_connections: cell_data = self._ij_connections[location] conn_color = "T{}".format(var.color) if cell_data.color is not None: conn_color = _color(cell_data.color, cell_data.highlight) tikz.append( r" \blockmat{%s*\comp}{%s*\comp}{%s}{draw=white,fill=%s}{}\\" % (col_size, row_size, cell_data.text, conn_color) ) elif location in self._ij_text: cell_data = self._ij_text[location] tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (col_size, row_size, cell_data.text)) else: tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{}\\" % (col_size, row_size)) tikz.append(r" }") tikz.append(r"}") lhs_tikz = "\n".join(tikz) self._terms.append(lhs_tikz) return lhs_tikz def vector(self, base_color="red", highlight=None): self._process_vars() tikz = [] if highlight is None: highlight = np.ones(self._n_vars) for i, h_light in enumerate(highlight): color = _color(base_color, h_light) row_size = self._ij_variables[i].size tikz.append(r"\blockrow{\blockcol{") if h_light == "diag": tikz.append( r" \blockdiag{1*\comp}{%s*\comp}{}{draw=white,fill=T%s}{\dt}{draw=white,fill=D%s}\\" % (row_size, color, color) ) else: tikz.append(r" \blockmat{1*\comp}{%s*\comp}{}{draw=white,fill=%s}{}\\" % (row_size, color)) tikz.append(r"}}") vec_tikz = "\n".join(tikz) self._terms.append(vec_tikz) return vec_tikz def operator(self, opperator="="): self._process_vars() tikz = [] padding_size = (self._total_size - 1) / 2 tikz.append(r"\blockrow{") tikz.append(r" \blockempty{\mwid}{%s*\comp}{} \\" % (padding_size)) tikz.append(r" \blockmat{\mwid}{1*\comp}{\huge $%s$}{draw=white,fill=white}{}\\" % (opperator)) tikz.append(r" \blockempty{\mwid}{%s*\comp}{} \\" % (padding_size)) tikz.append(r"}") op_tikz = "\n".join(tikz) self._terms.append(op_tikz) return op_tikz def spacer(self): self._process_vars() tikz = [] for i in range(self._n_vars): row_size = self._ij_variables[i].size tikz.append(r"\blockrow{\blockcol{") tikz.append(r" \blockmat{.25*\mwid}{%s*\comp}{}{draw=white,fill=white}{}\\" % (row_size)) tikz.append(r"}}") spacer_tikz = "\n".join(tikz) self._terms.append(spacer_tikz) return spacer_tikz def write(self, out_file=None, build=True, cleanup=True): """ Write output files for the matrix equation diagram. This produces the following: - {file_name}.tikz A file containing the TIKZ definition of the tikz diagram. - {file_name}.tex A standalone document wrapped around an include of the TIKZ file which can be compiled to a pdf. - {file_name}.pdf An optional compiled version of the standalone tex file. Parameters ---------- file_name : str The prefix to be used for the output files build : bool Flag that determines whether the standalone PDF of the XDSM will be compiled. Default is True. cleanup: bool Flag that determines if padlatex build files will be deleted after build is complete """ tikz = [] tikz.append(r"\blockrow{") for term in self._terms: tikz.append(r"\blockcol{") tikz.append(term) tikz.append(r"}") tikz.append(r"}") eqn_tikz = "\n".join(tikz) if out_file: # with open('{}.tex'.format(out_file), 'w') as f: # f.write(base_file_start) # f.write(eqn_tikz) # f.write(base_file_end) # if build: # os.system('pdflatex {}.tex'.format(out_file)) # if cleanup: # for ext in ['aux', 'fdb_latexmk', 'fls', 'log', 'tex']: # f_name = '{}.{}'.format(out_file, ext) # if os.path.exists(f_name): # os.remove(f_name) _write_tikz(eqn_tikz, out_file, build, cleanup) if __name__ == "__main__": lst = MatrixEquation() lst.add_variable("x", text=r"$x$") lst.add_variable("y", size=3, text=r"$y$") lst.add_variable("z") lst.connect("x", "y") lst.connect("y", "z") lst.text("z", "x", r"$0$") lst.jacobian(transpose=True) lst.spacer() lst.vector(base_color="green", highlight=[3, 2, "diag"]) lst.operator("=") lst.vector(base_color="red") lst.vector(base_color="red") lst.write("test") J = TotalJacobian() J.add_input("a", text=r"$a$") J.add_input("b", text=r"$b$") J.add_input("c", text=r"$c$") J.add_input("d", text=r"$d$") J.add_input("e", text=r"$e$") J.add_output("gc", text=r"$g_c$") J.add_output("gd", text=r"$g_d$") J.add_output("ge", text=r"$g_e$") J.add_output("f", text=r"$f$") J.connect("a", ("gc", "gd", "ge", "f")) J.connect("b", ("gc", "gd", "ge", "f")) J.connect("c", "gc") J.connect("d", "gd") J.connect("e", ("ge", "f")) J.write("J_test", cleanup=False)
from __future__ import division import os import numpy as np from collections import namedtuple # color pallette link: http://paletton.com/#uid=72Q1j0kllllkS5tKC9H96KClOKC base_file_start = r"""\documentclass[border=0pt]{standalone} % <NAME> 2018 % Based off code by <NAME> (2014), who based his code off <NAME>: http://www.alecjacobson.com/weblog/?p=1289 % nc = necessary comment [do not remove] % Four rules for using these macros: % 1. Always start with a row % 2. Rows contain cols and cols contain rows % 3. Mats should be on at least the 3rd level (row->col->mat, minimum) % 4. If a row contains the mat, add &; if a col contains the mat, add \\ % --------------------------------------- \usepackage{tikz} \usepackage{ifthen} \usepackage{esdiff} \usepackage{varwidth} \definecolor{tableau0}{RGB}{77, 121, 168} \definecolor{tableau1}{RGB}{242, 142, 43} \definecolor{tableau2}{RGB}{255, 87, 88} \definecolor{tableau3}{RGB}{118, 183, 178} \definecolor{tableau4}{RGB}{89, 161, 78} \definecolor{tableau5}{RGB}{237, 201, 72} \definecolor{tableau6}{RGB}{176, 121, 162} \definecolor{tableau7}{RGB}{255, 157, 167} \definecolor{tableau8}{RGB}{156, 116, 94} \definecolor{tableau9}{RGB}{186, 176, 172} \newcommand{\thk}{0.01in} \newcommand{\thkln}{0.02in} % \blockmat{width}{height}{text}{block_options}{other} \newcommand{\blockmat}[5]{ \begin{tikzpicture} \draw[draw=white,fill=white,#4,line width=\thk] (0,0) rectangle( #1-\thk,#2-\thk); #5 \draw (#1/2, #2/2) node {#3}; \end{tikzpicture} } % blockempty{width}{height}{text} \newcommand{\blockempty}[3]{ \blockmat{#1}{#2}{#3}{draw=white,fill=white}{}% } % \blockmat{width}{height}{text}{block_options}{diagonal_width}{diagonal_options} \newcommand{\blockdiag}[6]{ \blockmat{#1}{#2}{#3}{#4} { \draw[#6,line width=\thk] (0,#2-\thk) -- (#5,#2-\thk) -- ( #1-\thk,#5) -- ( #1-\thk,0) -- ( #1-\thk - #5,0) -- (0,#2-\thk -#5) --cycle; }% } % \blockddots{width}{height}{text}{block_options}{dot_radius}{dot_options}{dot_h}{dot_v} \newcommand{\blockdots}[8]{ \blockmat{#1}{#2}{#3}{#4}% {% \ifthenelse{\equal{#5}{}} {\newcommand\dotradius{0.01in}} {\newcommand\dotradius{#5}}% \filldraw[#6] (#1/2, #2/2) circle (0.5*\dotradius);% \filldraw[#6] (#1/2 + #7, #2/2 + #8) circle (0.5*\dotradius);% \filldraw[#6] (#1/2 - #7, #2/2 - #8) circle (0.5*\dotradius);% }% } % \leftbracket{width}{height}{options} \newcommand{\leftbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNE) -- (iNW) -- (iSW) -- (iSE); \draw[draw=white,line width=\thk] (oNE) -- (oNW) -- (oSW) -- (oSE); \end{tikzpicture}%nc } % \rightbracket{width}{height}{options} \newcommand{\rightbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNW) -- (iNE) -- (iSE) -- (iSW); \draw[draw=white,line width=\thk] (oNW) -- (oNE) -- (oSE) -- (oSW); \end{tikzpicture}%nc } % \upperbracket{width}{height}{options} \newcommand{\upperbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iSW) -- (iNW) -- (iNE) -- (iSE); \draw[draw=white,line width=\thk] (oSW) -- (oNW) -- (oNE) -- (oSE); \end{tikzpicture}%nc } % \lowerbracket{width}{height}{options} \newcommand{\lowerbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNW) -- (iSW) -- (iSE) -- (iNE); \draw[draw=white,line width=\thk] (oNW) -- (oSW) -- (oSE) -- (oNE); \end{tikzpicture}%nc } % a hack so that I don't have to worry about the number of columns or % spaces between columns in the tabular environment \newenvironment{blockmatrixtabular} {%nc \renewcommand{\arraystretch}{0}%nc \begin{tabular}{ @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{} }%nc } { \end{tabular}%nc } % \blockcol{ % } \newcommand{\blockcol}[1]{\vtop{\null\hbox{\begin{blockmatrixtabular}#1\end{blockmatrixtabular}}}&} % \blockrow{ % } \newcommand{\blockrow}[1]{\begin{blockmatrixtabular}#1\end{blockmatrixtabular}\\} \begin{document} \begin{varwidth}{10\textwidth} \newcommand\mwid{0.5in} \newcommand\wid{0.15in} \newcommand\comp{0.3in} \newcommand\ext{0.5in} \newcommand\dt{0.03in} \newcommand\txt{0.8in} \definecolor{Tgrey}{rgb}{0.9,0.9,0.9} \definecolor{Tred}{rgb}{1.0,0.722,0.714} \definecolor{Tgreen}{rgb}{0.639,0.89,0.655} \definecolor{Tblue}{rgb}{0.667,0.631,0.843} \definecolor{Tyellow}{rgb}{1,0.941,0.714} \definecolor{Lred}{rgb}{17.3,0.063,0.059} \definecolor{Lgreen}{rgb}{0.047,0.133,0.051} \definecolor{Lblue}{rgb}{0.063,0.051,0.118} \definecolor{Lyellow}{rgb}{0.173,0.149,0.059} \definecolor{Dgrey}{rgb}{0.4,0.4,0.4} \definecolor{Dred}{rgb}{1.0,0.333,0.318} \definecolor{Dgreen}{rgb}{0.282,0.89,0.322} \definecolor{Dblue}{rgb}{0.42,0.341,0.843} \definecolor{Dyellow}{rgb}{1.0,0.863,0.318} \definecolor{Bred}{rgb}{0.302,0.8,0.0} \definecolor{Bgreen}{rgb}{0.4,1.0,0.4} \definecolor{Bblue}{rgb}{0.043,0.012,0.208} \definecolor{Byellow}{rgb}{0.302,0.243,0.0} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% """ base_file_end = r""" %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{varwidth} \end{document}""" Variable = namedtuple("Variable", field_names=["size", "idx", "text", "color"]) CellData = namedtuple("CellData", field_names=["text", "color", "highlight"]) def _color(base_color, h_light): if h_light == -1: color = "white" elif h_light == 0: color = "Tgrey" elif h_light == 1: color = "T{}".format(base_color) elif h_light == 2: color = "D{}".format(base_color) elif h_light == 3: color = "B{}".format(base_color) elif h_light == "diag": color = base_color return color def _write_tikz(tikz, out_file, build=True, cleanup=True): with open("{}.tex".format(out_file), "w") as f: f.write(base_file_start) f.write(tikz) f.write(base_file_end) if build: os.system("pdflatex {}.tex".format(out_file)) if cleanup: for ext in ["aux", "fdb_latexmk", "fls", "log", "tex"]: f_name = "{}.{}".format(out_file, ext) if os.path.exists(f_name): os.remove(f_name) class TotalJacobian(object): def __init__(self): self._variables = {} self._j_inputs = {} self._n_inputs = 0 self._i_outputs = {} self._n_outputs = 0 self._connections = {} self._ij_connections = {} self._setup = False def add_input(self, name, size=1, text=""): self._variables[name] = Variable(size=size, idx=self._n_inputs, text=text, color=None) self._j_inputs[self._n_inputs] = self._variables[name] self._n_inputs += 1 def add_output(self, name, size=1, text=""): self._variables[name] = Variable(size=size, idx=self._n_outputs, text=text, color=None) self._i_outputs[self._n_outputs] = self._variables[name] self._n_outputs += 1 def connect(self, src, target, text="", color="tableau0"): if isinstance(target, (list, tuple)): for t in target: self._connections[src, t] = CellData(text=text, color=color, highlight="diag") else: self._connections[src, target] = CellData(text=text, color=color, highlight="diag") def _process_vars(self): if self._setup: return # deal with connections for (src, target), cell_data in self._connections.items(): i_src = self._variables[src].idx j_target = self._variables[target].idx self._ij_connections[i_src, j_target] = cell_data self._setup = True def write(self, out_file=None, build=True, cleanup=True): """ Write output files for the matrix equation diagram. This produces the following: - {file_name}.tikz A file containing the TIKZ definition of the tikz diagram. - {file_name}.tex A standalone document wrapped around an include of the TIKZ file which can be compiled to a pdf. - {file_name}.pdf An optional compiled version of the standalone tex file. Parameters ---------- file_name : str The prefix to be used for the output files build : bool Flag that determines whether the standalone PDF of the XDSM will be compiled. Default is True. cleanup: bool Flag that determines if padlatex build files will be deleted after build is complete """ self._process_vars() tikz = [] # label the columns tikz.append(r"\blockrow{") # emtpy column for the row labels tikz.append(r" \blockcol{") tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (1, 1, "")) tikz.append(r" }") for j in range(self._n_inputs): var = self._j_inputs[j] col_size = var.size tikz.append(r" \blockcol{") tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (col_size, 1, var.text)) tikz.append(r" }") tikz.append(r"}") for i in range(self._n_outputs): output = self._i_outputs[i] row_size = output.size tikz.append(r"\blockrow{") # label the row with the output name tikz.append(r" \blockcol{") tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (1, row_size, output.text)) tikz.append(r" }") for j in range(self._n_inputs): var = self._j_inputs[j] col_size = var.size tikz.append(r" \blockcol{") if (j, i) in self._ij_connections: cell_data = self._ij_connections[(j, i)] conn_color = "T{}".format(var.color) if cell_data.color is not None: conn_color = _color(cell_data.color, cell_data.highlight) tikz.append( r" \blockmat{%s*\comp}{%s*\comp}{%s}{draw=white,fill=%s}{}\\" % (col_size, row_size, cell_data.text, conn_color) ) else: tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{}\\" % (col_size, row_size)) tikz.append(r" }") tikz.append(r"}") jac_tikz = "\n".join(tikz) _write_tikz(jac_tikz, out_file, build, cleanup) class MatrixEquation(object): def __init__(self): self._variables = {} self._ij_variables = {} self._n_vars = 0 self._connections = {} self._ij_connections = {} self._text = {} self._ij_text = {} self._total_size = 0 self._setup = False self._terms = [] def clear_terms(self): self._terms = [] def add_variable(self, name, size=1, text="", color="blue"): self._variables[name] = Variable(size=size, idx=self._n_vars, text=text, color=color) self._ij_variables[self._n_vars] = self._variables[name] self._n_vars += 1 self._total_size += size def connect(self, src, target, text="", color=None, highlight=1): if isinstance(target, (list, tuple)): for t in target: self._connections[src, t] = CellData(text=text, color=color, highlight=highlight) else: self._connections[src, target] = CellData(text=text, color=color, highlight=highlight) def text(self, src, target, text): "don't connect the src and target, but put some text where a connection would be" self._text[src, target] = CellData(text=text, color=None, highlight=-1) def _process_vars(self): """map all the data onto i,j grid""" if self._setup: return # deal with connections for (src, target), cell_data in self._connections.items(): i_src = self._variables[src].idx i_target = self._variables[target].idx self._ij_connections[i_src, i_target] = cell_data for (src, target), cell_data in self._text.items(): i_src = self._variables[src].idx j_target = self._variables[target].idx self._ij_text[i_src, j_target] = cell_data self._setup = True def jacobian(self, transpose=False): self._process_vars() tikz = [] for i in range(self._n_vars): tikz.append(r"\blockrow{") row_size = self._ij_variables[i].size for j in range(self._n_vars): var = self._ij_variables[j] col_size = var.size tikz.append(r" \blockcol{") if transpose: location = (i, j) else: location = (j, i) if i == j: tikz.append( r" \blockmat{%s*\comp}{%s*\comp}{%s}{draw=white,fill=D%s}{}\\" % (col_size, row_size, var.text, var.color) ) elif location in self._ij_connections: cell_data = self._ij_connections[location] conn_color = "T{}".format(var.color) if cell_data.color is not None: conn_color = _color(cell_data.color, cell_data.highlight) tikz.append( r" \blockmat{%s*\comp}{%s*\comp}{%s}{draw=white,fill=%s}{}\\" % (col_size, row_size, cell_data.text, conn_color) ) elif location in self._ij_text: cell_data = self._ij_text[location] tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{%s}\\" % (col_size, row_size, cell_data.text)) else: tikz.append(r" \blockempty{%s*\comp}{%s*\comp}{}\\" % (col_size, row_size)) tikz.append(r" }") tikz.append(r"}") lhs_tikz = "\n".join(tikz) self._terms.append(lhs_tikz) return lhs_tikz def vector(self, base_color="red", highlight=None): self._process_vars() tikz = [] if highlight is None: highlight = np.ones(self._n_vars) for i, h_light in enumerate(highlight): color = _color(base_color, h_light) row_size = self._ij_variables[i].size tikz.append(r"\blockrow{\blockcol{") if h_light == "diag": tikz.append( r" \blockdiag{1*\comp}{%s*\comp}{}{draw=white,fill=T%s}{\dt}{draw=white,fill=D%s}\\" % (row_size, color, color) ) else: tikz.append(r" \blockmat{1*\comp}{%s*\comp}{}{draw=white,fill=%s}{}\\" % (row_size, color)) tikz.append(r"}}") vec_tikz = "\n".join(tikz) self._terms.append(vec_tikz) return vec_tikz def operator(self, opperator="="): self._process_vars() tikz = [] padding_size = (self._total_size - 1) / 2 tikz.append(r"\blockrow{") tikz.append(r" \blockempty{\mwid}{%s*\comp}{} \\" % (padding_size)) tikz.append(r" \blockmat{\mwid}{1*\comp}{\huge $%s$}{draw=white,fill=white}{}\\" % (opperator)) tikz.append(r" \blockempty{\mwid}{%s*\comp}{} \\" % (padding_size)) tikz.append(r"}") op_tikz = "\n".join(tikz) self._terms.append(op_tikz) return op_tikz def spacer(self): self._process_vars() tikz = [] for i in range(self._n_vars): row_size = self._ij_variables[i].size tikz.append(r"\blockrow{\blockcol{") tikz.append(r" \blockmat{.25*\mwid}{%s*\comp}{}{draw=white,fill=white}{}\\" % (row_size)) tikz.append(r"}}") spacer_tikz = "\n".join(tikz) self._terms.append(spacer_tikz) return spacer_tikz def write(self, out_file=None, build=True, cleanup=True): """ Write output files for the matrix equation diagram. This produces the following: - {file_name}.tikz A file containing the TIKZ definition of the tikz diagram. - {file_name}.tex A standalone document wrapped around an include of the TIKZ file which can be compiled to a pdf. - {file_name}.pdf An optional compiled version of the standalone tex file. Parameters ---------- file_name : str The prefix to be used for the output files build : bool Flag that determines whether the standalone PDF of the XDSM will be compiled. Default is True. cleanup: bool Flag that determines if padlatex build files will be deleted after build is complete """ tikz = [] tikz.append(r"\blockrow{") for term in self._terms: tikz.append(r"\blockcol{") tikz.append(term) tikz.append(r"}") tikz.append(r"}") eqn_tikz = "\n".join(tikz) if out_file: # with open('{}.tex'.format(out_file), 'w') as f: # f.write(base_file_start) # f.write(eqn_tikz) # f.write(base_file_end) # if build: # os.system('pdflatex {}.tex'.format(out_file)) # if cleanup: # for ext in ['aux', 'fdb_latexmk', 'fls', 'log', 'tex']: # f_name = '{}.{}'.format(out_file, ext) # if os.path.exists(f_name): # os.remove(f_name) _write_tikz(eqn_tikz, out_file, build, cleanup) if __name__ == "__main__": lst = MatrixEquation() lst.add_variable("x", text=r"$x$") lst.add_variable("y", size=3, text=r"$y$") lst.add_variable("z") lst.connect("x", "y") lst.connect("y", "z") lst.text("z", "x", r"$0$") lst.jacobian(transpose=True) lst.spacer() lst.vector(base_color="green", highlight=[3, 2, "diag"]) lst.operator("=") lst.vector(base_color="red") lst.vector(base_color="red") lst.write("test") J = TotalJacobian() J.add_input("a", text=r"$a$") J.add_input("b", text=r"$b$") J.add_input("c", text=r"$c$") J.add_input("d", text=r"$d$") J.add_input("e", text=r"$e$") J.add_output("gc", text=r"$g_c$") J.add_output("gd", text=r"$g_d$") J.add_output("ge", text=r"$g_e$") J.add_output("f", text=r"$f$") J.connect("a", ("gc", "gd", "ge", "f")) J.connect("b", ("gc", "gd", "ge", "f")) J.connect("c", "gc") J.connect("d", "gd") J.connect("e", ("ge", "f")) J.write("J_test", cleanup=False)
en
0.571437
# color pallette link: http://paletton.com/#uid=72Q1j0kllllkS5tKC9H96KClOKC \documentclass[border=0pt]{standalone} % <NAME> 2018 % Based off code by <NAME> (2014), who based his code off <NAME>: http://www.alecjacobson.com/weblog/?p=1289 % nc = necessary comment [do not remove] % Four rules for using these macros: % 1. Always start with a row % 2. Rows contain cols and cols contain rows % 3. Mats should be on at least the 3rd level (row->col->mat, minimum) % 4. If a row contains the mat, add &; if a col contains the mat, add \\ % --------------------------------------- \usepackage{tikz} \usepackage{ifthen} \usepackage{esdiff} \usepackage{varwidth} \definecolor{tableau0}{RGB}{77, 121, 168} \definecolor{tableau1}{RGB}{242, 142, 43} \definecolor{tableau2}{RGB}{255, 87, 88} \definecolor{tableau3}{RGB}{118, 183, 178} \definecolor{tableau4}{RGB}{89, 161, 78} \definecolor{tableau5}{RGB}{237, 201, 72} \definecolor{tableau6}{RGB}{176, 121, 162} \definecolor{tableau7}{RGB}{255, 157, 167} \definecolor{tableau8}{RGB}{156, 116, 94} \definecolor{tableau9}{RGB}{186, 176, 172} \newcommand{\thk}{0.01in} \newcommand{\thkln}{0.02in} % \blockmat{width}{height}{text}{block_options}{other} \newcommand{\blockmat}[5]{ \begin{tikzpicture} \draw[draw=white,fill=white,#4,line width=\thk] (0,0) rectangle( #1-\thk,#2-\thk); #5 \draw (#1/2, #2/2) node {#3}; \end{tikzpicture} } % blockempty{width}{height}{text} \newcommand{\blockempty}[3]{ \blockmat{#1}{#2}{#3}{draw=white,fill=white}{}% } % \blockmat{width}{height}{text}{block_options}{diagonal_width}{diagonal_options} \newcommand{\blockdiag}[6]{ \blockmat{#1}{#2}{#3}{#4} { \draw[#6,line width=\thk] (0,#2-\thk) -- (#5,#2-\thk) -- ( #1-\thk,#5) -- ( #1-\thk,0) -- ( #1-\thk - #5,0) -- (0,#2-\thk -#5) --cycle; }% } % \blockddots{width}{height}{text}{block_options}{dot_radius}{dot_options}{dot_h}{dot_v} \newcommand{\blockdots}[8]{ \blockmat{#1}{#2}{#3}{#4}% {% \ifthenelse{\equal{#5}{}} {\newcommand\dotradius{0.01in}} {\newcommand\dotradius{#5}}% \filldraw[#6] (#1/2, #2/2) circle (0.5*\dotradius);% \filldraw[#6] (#1/2 + #7, #2/2 + #8) circle (0.5*\dotradius);% \filldraw[#6] (#1/2 - #7, #2/2 - #8) circle (0.5*\dotradius);% }% } % \leftbracket{width}{height}{options} \newcommand{\leftbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNE) -- (iNW) -- (iSW) -- (iSE); \draw[draw=white,line width=\thk] (oNE) -- (oNW) -- (oSW) -- (oSE); \end{tikzpicture}%nc } % \rightbracket{width}{height}{options} \newcommand{\rightbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNW) -- (iNE) -- (iSE) -- (iSW); \draw[draw=white,line width=\thk] (oNW) -- (oNE) -- (oSE) -- (oSW); \end{tikzpicture}%nc } % \upperbracket{width}{height}{options} \newcommand{\upperbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iSW) -- (iNW) -- (iNE) -- (iSE); \draw[draw=white,line width=\thk] (oSW) -- (oNW) -- (oNE) -- (oSE); \end{tikzpicture}%nc } % \lowerbracket{width}{height}{options} \newcommand{\lowerbracket}[3]{ \begin{tikzpicture} \coordinate (iSW) at (\thk+\thkln/2,\thk+\thkln/2); \coordinate (iNW) at (\thk+\thkln/2,#2-\thk-\thkln/2); \coordinate (iSE) at (#1-\thk-\thkln/2,\thk+\thkln/2); \coordinate (iNE) at (#1-\thk-\thkln/2,#2-\thk-\thkln/2); \coordinate (oSW) at (\thk/2,\thk/2); \coordinate (oNW) at (\thk/2,#2-\thk/2); \coordinate (oSE) at (#1-\thk/2,\thk/2); \coordinate (oNE) at (#1-\thk/2,#2-\thk/2); \draw[#3,line width=\thkln] (iNW) -- (iSW) -- (iSE) -- (iNE); \draw[draw=white,line width=\thk] (oNW) -- (oSW) -- (oSE) -- (oNE); \end{tikzpicture}%nc } % a hack so that I don't have to worry about the number of columns or % spaces between columns in the tabular environment \newenvironment{blockmatrixtabular} {%nc \renewcommand{\arraystretch}{0}%nc \begin{tabular}{ @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l@{}l @{} }%nc } { \end{tabular}%nc } % \blockcol{ % } \newcommand{\blockcol}[1]{\vtop{\null\hbox{\begin{blockmatrixtabular}#1\end{blockmatrixtabular}}}&} % \blockrow{ % } \newcommand{\blockrow}[1]{\begin{blockmatrixtabular}#1\end{blockmatrixtabular}\\} \begin{document} \begin{varwidth}{10\textwidth} \newcommand\mwid{0.5in} \newcommand\wid{0.15in} \newcommand\comp{0.3in} \newcommand\ext{0.5in} \newcommand\dt{0.03in} \newcommand\txt{0.8in} \definecolor{Tgrey}{rgb}{0.9,0.9,0.9} \definecolor{Tred}{rgb}{1.0,0.722,0.714} \definecolor{Tgreen}{rgb}{0.639,0.89,0.655} \definecolor{Tblue}{rgb}{0.667,0.631,0.843} \definecolor{Tyellow}{rgb}{1,0.941,0.714} \definecolor{Lred}{rgb}{17.3,0.063,0.059} \definecolor{Lgreen}{rgb}{0.047,0.133,0.051} \definecolor{Lblue}{rgb}{0.063,0.051,0.118} \definecolor{Lyellow}{rgb}{0.173,0.149,0.059} \definecolor{Dgrey}{rgb}{0.4,0.4,0.4} \definecolor{Dred}{rgb}{1.0,0.333,0.318} \definecolor{Dgreen}{rgb}{0.282,0.89,0.322} \definecolor{Dblue}{rgb}{0.42,0.341,0.843} \definecolor{Dyellow}{rgb}{1.0,0.863,0.318} \definecolor{Bred}{rgb}{0.302,0.8,0.0} \definecolor{Bgreen}{rgb}{0.4,1.0,0.4} \definecolor{Bblue}{rgb}{0.043,0.012,0.208} \definecolor{Byellow}{rgb}{0.302,0.243,0.0} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{varwidth} \end{document} # deal with connections Write output files for the matrix equation diagram. This produces the following: - {file_name}.tikz A file containing the TIKZ definition of the tikz diagram. - {file_name}.tex A standalone document wrapped around an include of the TIKZ file which can be compiled to a pdf. - {file_name}.pdf An optional compiled version of the standalone tex file. Parameters ---------- file_name : str The prefix to be used for the output files build : bool Flag that determines whether the standalone PDF of the XDSM will be compiled. Default is True. cleanup: bool Flag that determines if padlatex build files will be deleted after build is complete # label the columns # emtpy column for the row labels # label the row with the output name map all the data onto i,j grid # deal with connections Write output files for the matrix equation diagram. This produces the following: - {file_name}.tikz A file containing the TIKZ definition of the tikz diagram. - {file_name}.tex A standalone document wrapped around an include of the TIKZ file which can be compiled to a pdf. - {file_name}.pdf An optional compiled version of the standalone tex file. Parameters ---------- file_name : str The prefix to be used for the output files build : bool Flag that determines whether the standalone PDF of the XDSM will be compiled. Default is True. cleanup: bool Flag that determines if padlatex build files will be deleted after build is complete # with open('{}.tex'.format(out_file), 'w') as f: # f.write(base_file_start) # f.write(eqn_tikz) # f.write(base_file_end) # if build: # os.system('pdflatex {}.tex'.format(out_file)) # if cleanup: # for ext in ['aux', 'fdb_latexmk', 'fls', 'log', 'tex']: # f_name = '{}.{}'.format(out_file, ext) # if os.path.exists(f_name): # os.remove(f_name)
2.709649
3
python/src/main/python/pyalink/alink/common/types/model_info.py
wenwei8268/Alink
0
6629815
from .bases.j_obj_wrapper import JavaObjectWrapperWithAutoTypeConversion from ..utils.packages import in_ipython class ClusteringModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.clustering.ClusteringModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getClusterCenter(self, clusterId): return self.getClusterCenter(clusterId) def getClusterNumber(self): return self.getClusterNumber() class KMeansModelInfo(ClusteringModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.KMeansModelInfoBatchOp$KMeansModelInfo' def __init__(self, j_obj): super(KMeansModelInfo, self).__init__(j_obj) def getClusterWeight(self, clusterId): return self.getClusterWeight(clusterId) class BisectingKMeansModelInfo(ClusteringModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.BisectingKMeansModelInfoBatchOp$BisectingKMeansModelInfo' def __init__(self, j_obj): super(BisectingKMeansModelInfo, self).__init__(j_obj) class GmmModelInfo(ClusteringModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.GmmModelInfoBatchOp$GmmModelInfo' def __init__(self, j_obj): super(GmmModelInfo, self).__init__(j_obj) def getClusterCovarianceMatrix(self, clusterId): return self.getClusterCovarianceMatrix(clusterId) class OneHotModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.OneHotModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getSelectedColsInModel(self): return self.getSelectedColsInModel() def getDistinctTokenNumber(self, columnName): return self.getDistinctTokenNumber(columnName) def getTokens(self, columnName): return self.getTokens(columnName) class QuantileDiscretizerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.QuantileDiscretizerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getSelectedColsInModel(self): return self.getSelectedColsInModel() def getCutsArray(self, columnName): return self.getCutsArray(columnName) _unsupported_j_methods = ['mapToString'] class NaiveBayesTextModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.batch.classification.NaiveBayesTextModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getVectorColName(self): return self.getVectorColName() def getModelType(self): return self.getModelType() def getLabelList(self): return self.getLabelList() def getPriorProbability(self): return self.getPriorProbability() def getFeatureProbability(self): return self.getFeatureProbability() _unsupported_j_methods = ['generateLabelProportionTable'] class LdaModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.LdaModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getLogPerplexity(self): return self.getLogPerplexity() def getLogLikelihood(self): return self.getLogLikelihood() def getTopicNum(self): return self.getTopicNum() def getVocabularySize(self): return self.getVocabularySize() class MaxAbsScalarModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.MaxAbsScalarModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxAbs(self): return self.getMaxAbs() class MinMaxScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.MinMaxScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxs(self): return self.getMaxs() def getMins(self): return self.getMins() class StandardScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.StandardScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getStdDevs(self): return self.getStdDevs() def getMeans(self): return self.getMeans() def isWithMeans(self): return self.isWithMeans() def isWithStdDevs(self): return self.isWithStdDevs() class VectorMaxAbsScalarModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.vector.VectorMaxAbsScalarModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxAbs(self): return self.getMaxAbs() class VectorMinMaxScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.vector.VectorMinMaxScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxs(self): return self.getMaxs() def getMins(self): return self.getMins() class VectorStandardScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.vector.VectorStandardScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getStdDevs(self): return self.getStdDevs() def getMeans(self): return self.getMeans() def isWithMeans(self): return self.isWithMeans() def isWithStdDevs(self): return self.isWithStdDevs() class EqualWidthDiscretizerModelInfo(QuantileDiscretizerModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.feature.EqualWidthDiscretizerModelInfoBatchOp$EqualWidthDiscretizerModelInfo' def __init__(self, j_obj): super(EqualWidthDiscretizerModelInfo, self).__init__(j_obj) def get_j_obj(self): return self._j_obj _unsupported_j_methods = ['mapToString'] class ChisqSelectorModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.ChisqSelectorModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def chisq(self, arg0): return self.chisq(arg0) def pValue(self, arg0): return self.pValue(arg0) def getSelectorType(self): return self.getSelectorType() def getNumTopFeatures(self): return self.getNumTopFeatures() def getPercentile(self): return self.getPercentile() def getFpr(self): return self.getFpr() def getFdr(self): return self.getFdr() def getFwe(self): return self.getFwe() def getSelectorNum(self): return self.getSelectorNum() def getColNames(self): return self.getColNames() def getSiftOutColNames(self): return self.getSiftOutColNames() class PcaModelData(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.pca.PcaModelData' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getCols(self): return self.getCols() def getEigenValues(self): return self.getEigenValues() def getEigenVectors(self): return self.getEigenVectors() def getProportions(self): return self.getProportions() def getCumulatives(self): return self.getCumulatives() class FmRegressorModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.fm.FmRegressorModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def hasIntercept(self): return self.hasIntercept() def hasLinearItem(self): return self.hasLinearItem() def getNumFactor(self): return self.getNumFactor() def getTask(self): return self.getTask() def getNumFeature(self): return self.getNumFeature() def getFactors(self): return self.getFactors() def getFeatureColNames(self): return self.getFeatureColNames() class FmClassifierModelInfo(FmRegressorModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.fm.FmClassifierModelInfo' def __init__(self, j_obj): super(FmClassifierModelInfo, self).__init__(j_obj) def getLabelValues(self): return self.getLabelValues() class LinearRegressorModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.linear.LinearRegressorModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def hasInterceptItem(self): return self.hasInterceptItem() def getFeatureNames(self): return self.getFeatureNames() def getVectorColName(self): return self.getVectorColName() def getWeight(self): return self.getWeight() def getVectorSize(self): return self.getVectorSize() def getModelName(self): return self.getModelName() class LinearClassifierModelInfo(LinearRegressorModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.linear.LinearClassifierModelInfo' def __init__(self, j_obj): super(LinearClassifierModelInfo, self).__init__(j_obj) def getLabelValues(self): return self.getLabelValues() class SoftmaxModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.linear.SoftmaxModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def hasInterceptItem(self): return self.hasInterceptItem() def getFeatureNames(self): return self.getFeatureNames() def getVectorColName(self): return self.getVectorColName() def getVectorSize(self): return self.getVectorSize() def getModelName(self): return self.getModelName() def getWeights(self): return self.getWeights() def getLabelValues(self): return self.getLabelValues() class TreeModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getFeatureImportance(self): return self.getFeatureImportance() def getNumTrees(self): return self.getNumTrees() def getFeatures(self): return self.getFeatures() def getCategoricalFeatures(self): return self.getCategoricalFeatures() def getCategoricalValues(self, categoricalCol): return self.getCategoricalValues(categoricalCol) def getLabels(self): return self.getLabels() class MultiTreeModelInfo(TreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$MultiTreeModelInfo' def __init__(self, j_obj): super(MultiTreeModelInfo, self).__init__(j_obj) def getCaseWhenRule(self, treeId): return self.getCaseWhenRule(treeId) def saveTreeAsImage(self, path, treeId, isOverwrite): self.saveTreeAsImage(path, treeId, isOverwrite) if in_ipython(): from IPython import display # noinspection PyTypeChecker display.display(display.Image(path)) class DecisionTreeModelInfo(TreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$DecisionTreeModelInfo' def __init__(self, j_obj): super(DecisionTreeModelInfo, self).__init__(j_obj) def getCaseWhenRule(self): return self.getCaseWhenRule() def saveTreeAsImage(self, path, isOverwrite): self.saveTreeAsImage(path, isOverwrite) if in_ipython(): from IPython import display # noinspection PyTypeChecker display.display(display.Image(path)) class RandomForestModelInfo(MultiTreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$RandomForestModelInfo' def __init__(self, j_obj): super(RandomForestModelInfo, self).__init__(j_obj) class GbdtModelInfo(MultiTreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$GbdtModelInfo' def __init__(self, j_obj): super(GbdtModelInfo, self).__init__(j_obj) class NaiveBayesModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.batch.classification.NaiveBayesModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getFeatureNames(self): return self.getFeatureNames() def getCategoryFeatureInfo(self): return self.getCategoryFeatureInfo() def getGaussFeatureInfo(self): return self.getGaussFeatureInfo() def getLabelList(self): return self.getLabelList() def getLabelProportion(self): return self.getLabelProportion() def getCategoryInfo(self): return self.getCategoryInfo() class ImputerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.ImputerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def fillValue(self): return self.fillValue() def getFillValues(self): return self.getFillValues() def getStrategy(self): return self.getStrategy() class GlmModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.regression.glm.GlmModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getIntercept(self): return self.getIntercept() def getFeatureColNames(self): return self.getFeatureColNames() def getLabelColName(self): return self.getLabelColName() def getVariancePower(self): return self.getVariancePower() def getLink(self): return self.getLink() def getLinkPower(self): return self.getLinkPower() def getCoefficients(self): return self.getCoefficients() def isFitIntercept(self): return self.isFitIntercept() def getDegreeOfFreedom(self): return self.getDegreeOfFreedom() def getResidualDegreeOfFreeDom(self): return self.getResidualDegreeOfFreeDom() def getResidualDegreeOfFreedomNull(self): return self.getResidualDegreeOfFreedomNull() def getAic(self): return self.getAic() def getDispersion(self): return self.getDispersion() def getDeviance(self): return self.getDeviance() def getNullDeviance(self): return self.getNullDeviance() def getTValues(self): return self.getTValues() def getPValues(self): return self.getPValues() def getStdErrors(self): return self.getStdErrors() def getFamily(self): return self.getFamily()
from .bases.j_obj_wrapper import JavaObjectWrapperWithAutoTypeConversion from ..utils.packages import in_ipython class ClusteringModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.clustering.ClusteringModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getClusterCenter(self, clusterId): return self.getClusterCenter(clusterId) def getClusterNumber(self): return self.getClusterNumber() class KMeansModelInfo(ClusteringModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.KMeansModelInfoBatchOp$KMeansModelInfo' def __init__(self, j_obj): super(KMeansModelInfo, self).__init__(j_obj) def getClusterWeight(self, clusterId): return self.getClusterWeight(clusterId) class BisectingKMeansModelInfo(ClusteringModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.BisectingKMeansModelInfoBatchOp$BisectingKMeansModelInfo' def __init__(self, j_obj): super(BisectingKMeansModelInfo, self).__init__(j_obj) class GmmModelInfo(ClusteringModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.GmmModelInfoBatchOp$GmmModelInfo' def __init__(self, j_obj): super(GmmModelInfo, self).__init__(j_obj) def getClusterCovarianceMatrix(self, clusterId): return self.getClusterCovarianceMatrix(clusterId) class OneHotModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.OneHotModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getSelectedColsInModel(self): return self.getSelectedColsInModel() def getDistinctTokenNumber(self, columnName): return self.getDistinctTokenNumber(columnName) def getTokens(self, columnName): return self.getTokens(columnName) class QuantileDiscretizerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.QuantileDiscretizerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getSelectedColsInModel(self): return self.getSelectedColsInModel() def getCutsArray(self, columnName): return self.getCutsArray(columnName) _unsupported_j_methods = ['mapToString'] class NaiveBayesTextModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.batch.classification.NaiveBayesTextModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getVectorColName(self): return self.getVectorColName() def getModelType(self): return self.getModelType() def getLabelList(self): return self.getLabelList() def getPriorProbability(self): return self.getPriorProbability() def getFeatureProbability(self): return self.getFeatureProbability() _unsupported_j_methods = ['generateLabelProportionTable'] class LdaModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.batch.clustering.LdaModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getLogPerplexity(self): return self.getLogPerplexity() def getLogLikelihood(self): return self.getLogLikelihood() def getTopicNum(self): return self.getTopicNum() def getVocabularySize(self): return self.getVocabularySize() class MaxAbsScalarModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.MaxAbsScalarModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxAbs(self): return self.getMaxAbs() class MinMaxScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.MinMaxScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxs(self): return self.getMaxs() def getMins(self): return self.getMins() class StandardScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.StandardScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getStdDevs(self): return self.getStdDevs() def getMeans(self): return self.getMeans() def isWithMeans(self): return self.isWithMeans() def isWithStdDevs(self): return self.isWithStdDevs() class VectorMaxAbsScalarModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.vector.VectorMaxAbsScalarModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxAbs(self): return self.getMaxAbs() class VectorMinMaxScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.vector.VectorMinMaxScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getMaxs(self): return self.getMaxs() def getMins(self): return self.getMins() class VectorStandardScalerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.vector.VectorStandardScalerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getStdDevs(self): return self.getStdDevs() def getMeans(self): return self.getMeans() def isWithMeans(self): return self.isWithMeans() def isWithStdDevs(self): return self.isWithStdDevs() class EqualWidthDiscretizerModelInfo(QuantileDiscretizerModelInfo): _j_cls_name = 'com.alibaba.alink.operator.batch.feature.EqualWidthDiscretizerModelInfoBatchOp$EqualWidthDiscretizerModelInfo' def __init__(self, j_obj): super(EqualWidthDiscretizerModelInfo, self).__init__(j_obj) def get_j_obj(self): return self._j_obj _unsupported_j_methods = ['mapToString'] class ChisqSelectorModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.ChisqSelectorModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def chisq(self, arg0): return self.chisq(arg0) def pValue(self, arg0): return self.pValue(arg0) def getSelectorType(self): return self.getSelectorType() def getNumTopFeatures(self): return self.getNumTopFeatures() def getPercentile(self): return self.getPercentile() def getFpr(self): return self.getFpr() def getFdr(self): return self.getFdr() def getFwe(self): return self.getFwe() def getSelectorNum(self): return self.getSelectorNum() def getColNames(self): return self.getColNames() def getSiftOutColNames(self): return self.getSiftOutColNames() class PcaModelData(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.feature.pca.PcaModelData' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getCols(self): return self.getCols() def getEigenValues(self): return self.getEigenValues() def getEigenVectors(self): return self.getEigenVectors() def getProportions(self): return self.getProportions() def getCumulatives(self): return self.getCumulatives() class FmRegressorModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.fm.FmRegressorModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def hasIntercept(self): return self.hasIntercept() def hasLinearItem(self): return self.hasLinearItem() def getNumFactor(self): return self.getNumFactor() def getTask(self): return self.getTask() def getNumFeature(self): return self.getNumFeature() def getFactors(self): return self.getFactors() def getFeatureColNames(self): return self.getFeatureColNames() class FmClassifierModelInfo(FmRegressorModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.fm.FmClassifierModelInfo' def __init__(self, j_obj): super(FmClassifierModelInfo, self).__init__(j_obj) def getLabelValues(self): return self.getLabelValues() class LinearRegressorModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.linear.LinearRegressorModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def hasInterceptItem(self): return self.hasInterceptItem() def getFeatureNames(self): return self.getFeatureNames() def getVectorColName(self): return self.getVectorColName() def getWeight(self): return self.getWeight() def getVectorSize(self): return self.getVectorSize() def getModelName(self): return self.getModelName() class LinearClassifierModelInfo(LinearRegressorModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.linear.LinearClassifierModelInfo' def __init__(self, j_obj): super(LinearClassifierModelInfo, self).__init__(j_obj) def getLabelValues(self): return self.getLabelValues() class SoftmaxModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.linear.SoftmaxModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def hasInterceptItem(self): return self.hasInterceptItem() def getFeatureNames(self): return self.getFeatureNames() def getVectorColName(self): return self.getVectorColName() def getVectorSize(self): return self.getVectorSize() def getModelName(self): return self.getModelName() def getWeights(self): return self.getWeights() def getLabelValues(self): return self.getLabelValues() class TreeModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getFeatureImportance(self): return self.getFeatureImportance() def getNumTrees(self): return self.getNumTrees() def getFeatures(self): return self.getFeatures() def getCategoricalFeatures(self): return self.getCategoricalFeatures() def getCategoricalValues(self, categoricalCol): return self.getCategoricalValues(categoricalCol) def getLabels(self): return self.getLabels() class MultiTreeModelInfo(TreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$MultiTreeModelInfo' def __init__(self, j_obj): super(MultiTreeModelInfo, self).__init__(j_obj) def getCaseWhenRule(self, treeId): return self.getCaseWhenRule(treeId) def saveTreeAsImage(self, path, treeId, isOverwrite): self.saveTreeAsImage(path, treeId, isOverwrite) if in_ipython(): from IPython import display # noinspection PyTypeChecker display.display(display.Image(path)) class DecisionTreeModelInfo(TreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$DecisionTreeModelInfo' def __init__(self, j_obj): super(DecisionTreeModelInfo, self).__init__(j_obj) def getCaseWhenRule(self): return self.getCaseWhenRule() def saveTreeAsImage(self, path, isOverwrite): self.saveTreeAsImage(path, isOverwrite) if in_ipython(): from IPython import display # noinspection PyTypeChecker display.display(display.Image(path)) class RandomForestModelInfo(MultiTreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$RandomForestModelInfo' def __init__(self, j_obj): super(RandomForestModelInfo, self).__init__(j_obj) class GbdtModelInfo(MultiTreeModelInfo): _j_cls_name = 'com.alibaba.alink.operator.common.tree.TreeModelInfo$GbdtModelInfo' def __init__(self, j_obj): super(GbdtModelInfo, self).__init__(j_obj) class NaiveBayesModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.batch.classification.NaiveBayesModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getFeatureNames(self): return self.getFeatureNames() def getCategoryFeatureInfo(self): return self.getCategoryFeatureInfo() def getGaussFeatureInfo(self): return self.getGaussFeatureInfo() def getLabelList(self): return self.getLabelList() def getLabelProportion(self): return self.getLabelProportion() def getCategoryInfo(self): return self.getCategoryInfo() class ImputerModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.dataproc.ImputerModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def fillValue(self): return self.fillValue() def getFillValues(self): return self.getFillValues() def getStrategy(self): return self.getStrategy() class GlmModelInfo(JavaObjectWrapperWithAutoTypeConversion): _j_cls_name = 'com.alibaba.alink.operator.common.regression.glm.GlmModelInfo' def __init__(self, j_obj): self._j_obj = j_obj def get_j_obj(self): return self._j_obj def getIntercept(self): return self.getIntercept() def getFeatureColNames(self): return self.getFeatureColNames() def getLabelColName(self): return self.getLabelColName() def getVariancePower(self): return self.getVariancePower() def getLink(self): return self.getLink() def getLinkPower(self): return self.getLinkPower() def getCoefficients(self): return self.getCoefficients() def isFitIntercept(self): return self.isFitIntercept() def getDegreeOfFreedom(self): return self.getDegreeOfFreedom() def getResidualDegreeOfFreeDom(self): return self.getResidualDegreeOfFreeDom() def getResidualDegreeOfFreedomNull(self): return self.getResidualDegreeOfFreedomNull() def getAic(self): return self.getAic() def getDispersion(self): return self.getDispersion() def getDeviance(self): return self.getDeviance() def getNullDeviance(self): return self.getNullDeviance() def getTValues(self): return self.getTValues() def getPValues(self): return self.getPValues() def getStdErrors(self): return self.getStdErrors() def getFamily(self): return self.getFamily()
en
0.215665
# noinspection PyTypeChecker # noinspection PyTypeChecker
2.227759
2
examples/manual/demo4b.py
eLBati/pyxb
123
6629816
# examples/manual/demo4b.py from __future__ import print_function import address addr = address.USAddress('<NAME>', '8 Oak Avenue', 'Anytown', 'AK', 12341) print(addr.toxml("utf-8", element_name='USAddress').decode('utf-8'))
# examples/manual/demo4b.py from __future__ import print_function import address addr = address.USAddress('<NAME>', '8 Oak Avenue', 'Anytown', 'AK', 12341) print(addr.toxml("utf-8", element_name='USAddress').decode('utf-8'))
es
0.272109
# examples/manual/demo4b.py
2.635158
3
irods_consortium_continuous_integration_test_hook.py
alanking/irods_resource_plugin_s3
0
6629817
<filename>irods_consortium_continuous_integration_test_hook.py from __future__ import print_function import optparse import os import shutil import glob import time import random import string import subprocess import irods_python_ci_utilities def get_build_prerequisites_apt(): return[] def get_build_prerequisites_yum(): return[] def get_build_prerequisites_zypper(): return[] def get_build_prerequisites(): dispatch_map = { 'Ubuntu': get_build_prerequisites_apt, 'Centos': get_build_prerequisites_yum, 'Centos linux': get_build_prerequisites_yum } try: return dispatch_map[irods_python_ci_utilities.get_distribution()]() except KeyError: irods_python_ci_utilities.raise_not_implemented_for_distribution() def install_build_prerequisites(): irods_python_ci_utilities.subprocess_get_output(['sudo', 'pip', 'install', 'boto3', '--upgrade'], check_rc=True) irods_python_ci_utilities.subprocess_get_output(['sudo', 'pip', 'install', 'minio==6.0.2', '--upgrade'], check_rc=True) irods_python_ci_utilities.subprocess_get_output(['sudo', '-EH', 'pip', 'install', 'unittest-xml-reporting==1.14.0']) if irods_python_ci_utilities.get_distribution() == 'Ubuntu': # cmake from externals requires newer libstdc++ on ub12 if irods_python_ci_utilities.get_distribution_version_major() == '12': irods_python_ci_utilities.install_os_packages(['python-software-properties']) irods_python_ci_utilities.subprocess_get_output(['sudo', 'add-apt-repository', '-y', 'ppa:ubuntu-toolchain-r/test'], check_rc=True) irods_python_ci_utilities.install_os_packages(['libstdc++6']) #irods_python_ci_utilities.install_os_packages(get_build_prerequisites()) def download_and_start_minio_server(): subprocess.check_output(['wget', 'https://dl.min.io/server/minio/release/linux-amd64/minio']) subprocess.check_output(['chmod', '+x', 'minio']) access_key = ''.join(random.choice(string.letters) for i in xrange(10)) secret_key = ''.join(random.choice(string.letters) for i in xrange(10)) with open('/var/lib/irods/minio.keypair', 'w') as f: f.write('%s\n' % access_key) f.write('%s\n' % secret_key) os.environ['MINIO_ACCESS_KEY'] = access_key os.environ['MINIO_SECRET_KEY'] = secret_key proc1 = subprocess.Popen(['./minio', 'server', '/data']) os.environ['MINIO_REGION_NAME'] = 'eu-central-1' proc2 = subprocess.Popen(['./minio', 'server', '--address', ':9001', '/data2']) return (proc1, proc2) def main(): parser = optparse.OptionParser() parser.add_option('--output_root_directory') parser.add_option('--built_packages_root_directory') options, _ = parser.parse_args() output_root_directory = options.output_root_directory built_packages_root_directory = options.built_packages_root_directory package_suffix = irods_python_ci_utilities.get_package_suffix() os_specific_directory = irods_python_ci_utilities.append_os_specific_directory(built_packages_root_directory) irods_python_ci_utilities.install_os_packages_from_files(glob.glob(os.path.join(os_specific_directory, 'irods-resource-plugin-s3*.{0}'.format(package_suffix)))) install_build_prerequisites() minio_processes = download_and_start_minio_server() time.sleep(10) try: test_output_file = 'log/test_output.log' irods_python_ci_utilities.subprocess_get_output(['sudo', 'su', '-', 'irods', '-c', 'python2 scripts/run_tests.py --xml_output --run_s test_irods_resource_plugin_s3_minio 2>&1 | tee {0}; exit $PIPESTATUS'.format(test_output_file)], check_rc=True) minio_processes[0].terminate() minio_processes[1].terminate() finally: if output_root_directory: irods_python_ci_utilities.gather_files_satisfying_predicate('/var/lib/irods/log', output_root_directory, lambda x: True) shutil.copy('/var/lib/irods/log/test_output.log', output_root_directory) shutil.copytree('/var/lib/irods/test-reports', os.path.join(output_root_directory, 'test-reports')) if __name__ == '__main__': main()
<filename>irods_consortium_continuous_integration_test_hook.py from __future__ import print_function import optparse import os import shutil import glob import time import random import string import subprocess import irods_python_ci_utilities def get_build_prerequisites_apt(): return[] def get_build_prerequisites_yum(): return[] def get_build_prerequisites_zypper(): return[] def get_build_prerequisites(): dispatch_map = { 'Ubuntu': get_build_prerequisites_apt, 'Centos': get_build_prerequisites_yum, 'Centos linux': get_build_prerequisites_yum } try: return dispatch_map[irods_python_ci_utilities.get_distribution()]() except KeyError: irods_python_ci_utilities.raise_not_implemented_for_distribution() def install_build_prerequisites(): irods_python_ci_utilities.subprocess_get_output(['sudo', 'pip', 'install', 'boto3', '--upgrade'], check_rc=True) irods_python_ci_utilities.subprocess_get_output(['sudo', 'pip', 'install', 'minio==6.0.2', '--upgrade'], check_rc=True) irods_python_ci_utilities.subprocess_get_output(['sudo', '-EH', 'pip', 'install', 'unittest-xml-reporting==1.14.0']) if irods_python_ci_utilities.get_distribution() == 'Ubuntu': # cmake from externals requires newer libstdc++ on ub12 if irods_python_ci_utilities.get_distribution_version_major() == '12': irods_python_ci_utilities.install_os_packages(['python-software-properties']) irods_python_ci_utilities.subprocess_get_output(['sudo', 'add-apt-repository', '-y', 'ppa:ubuntu-toolchain-r/test'], check_rc=True) irods_python_ci_utilities.install_os_packages(['libstdc++6']) #irods_python_ci_utilities.install_os_packages(get_build_prerequisites()) def download_and_start_minio_server(): subprocess.check_output(['wget', 'https://dl.min.io/server/minio/release/linux-amd64/minio']) subprocess.check_output(['chmod', '+x', 'minio']) access_key = ''.join(random.choice(string.letters) for i in xrange(10)) secret_key = ''.join(random.choice(string.letters) for i in xrange(10)) with open('/var/lib/irods/minio.keypair', 'w') as f: f.write('%s\n' % access_key) f.write('%s\n' % secret_key) os.environ['MINIO_ACCESS_KEY'] = access_key os.environ['MINIO_SECRET_KEY'] = secret_key proc1 = subprocess.Popen(['./minio', 'server', '/data']) os.environ['MINIO_REGION_NAME'] = 'eu-central-1' proc2 = subprocess.Popen(['./minio', 'server', '--address', ':9001', '/data2']) return (proc1, proc2) def main(): parser = optparse.OptionParser() parser.add_option('--output_root_directory') parser.add_option('--built_packages_root_directory') options, _ = parser.parse_args() output_root_directory = options.output_root_directory built_packages_root_directory = options.built_packages_root_directory package_suffix = irods_python_ci_utilities.get_package_suffix() os_specific_directory = irods_python_ci_utilities.append_os_specific_directory(built_packages_root_directory) irods_python_ci_utilities.install_os_packages_from_files(glob.glob(os.path.join(os_specific_directory, 'irods-resource-plugin-s3*.{0}'.format(package_suffix)))) install_build_prerequisites() minio_processes = download_and_start_minio_server() time.sleep(10) try: test_output_file = 'log/test_output.log' irods_python_ci_utilities.subprocess_get_output(['sudo', 'su', '-', 'irods', '-c', 'python2 scripts/run_tests.py --xml_output --run_s test_irods_resource_plugin_s3_minio 2>&1 | tee {0}; exit $PIPESTATUS'.format(test_output_file)], check_rc=True) minio_processes[0].terminate() minio_processes[1].terminate() finally: if output_root_directory: irods_python_ci_utilities.gather_files_satisfying_predicate('/var/lib/irods/log', output_root_directory, lambda x: True) shutil.copy('/var/lib/irods/log/test_output.log', output_root_directory) shutil.copytree('/var/lib/irods/test-reports', os.path.join(output_root_directory, 'test-reports')) if __name__ == '__main__': main()
en
0.593666
# cmake from externals requires newer libstdc++ on ub12 #irods_python_ci_utilities.install_os_packages(get_build_prerequisites())
1.898539
2
calibration/WeightSensor-calibration.py
TyBeeProject/TyBeeHive
2
6629818
<reponame>TyBeeProject/TyBeeHive<filename>calibration/WeightSensor-calibration.py #This script permit to calibrate weight sensors
#This script permit to calibrate weight sensors
en
0.623761
#This script permit to calibrate weight sensors
1.18304
1
Week7/ex7_1.py
North-Guard/BigToolsComplicatedData
0
6629819
from mrjob.job import MRJob class MRWordCount(MRJob): def mapper(self, key, line): yield "words", len(line.split()) def reducer(self, key, values): yield key, sum(values) if __name__ == "__main__": MRWordCount.run()
from mrjob.job import MRJob class MRWordCount(MRJob): def mapper(self, key, line): yield "words", len(line.split()) def reducer(self, key, values): yield key, sum(values) if __name__ == "__main__": MRWordCount.run()
none
1
2.790516
3
backend/api/avrae/cogs5e/sheets/abc.py
XoriensLair/XoriensLair.github.io
0
6629820
<reponame>XoriensLair/XoriensLair.github.io class SheetLoaderABC: def __init__(self, url): self.url = url self.character_data = None async def load_character(self, owner_id: str, args): raise NotImplemented # gsheet # v3: added stat cvars # v4: consumables # v5: spellbook # v6: v2.0 support (level vars, resistances, extra spells/attacks) # v7: race/background (experimental) # v8: skill/save effects # v15: version fix # dicecloud # v6: added stat cvars # v7: added check effects (adv/dis) # v8: consumables # v9: spellbook # v10: live tracking # v11: save effects (adv/dis) # v12: add cached dicecloud spell list id # v13: added nonstrict spells # v14: added race, background (for experimental purposes only) # v15: migrated to new sheet system # beyond # v1: initial implementation # v2: added race/background for research purposes # v15: standardize sheet import versions # all # v16: explicit spellcasting mod import # v17: refactor to use StatBlock, AttackList SHEET_VERSION = 17
class SheetLoaderABC: def __init__(self, url): self.url = url self.character_data = None async def load_character(self, owner_id: str, args): raise NotImplemented # gsheet # v3: added stat cvars # v4: consumables # v5: spellbook # v6: v2.0 support (level vars, resistances, extra spells/attacks) # v7: race/background (experimental) # v8: skill/save effects # v15: version fix # dicecloud # v6: added stat cvars # v7: added check effects (adv/dis) # v8: consumables # v9: spellbook # v10: live tracking # v11: save effects (adv/dis) # v12: add cached dicecloud spell list id # v13: added nonstrict spells # v14: added race, background (for experimental purposes only) # v15: migrated to new sheet system # beyond # v1: initial implementation # v2: added race/background for research purposes # v15: standardize sheet import versions # all # v16: explicit spellcasting mod import # v17: refactor to use StatBlock, AttackList SHEET_VERSION = 17
en
0.688145
# gsheet # v3: added stat cvars # v4: consumables # v5: spellbook # v6: v2.0 support (level vars, resistances, extra spells/attacks) # v7: race/background (experimental) # v8: skill/save effects # v15: version fix # dicecloud # v6: added stat cvars # v7: added check effects (adv/dis) # v8: consumables # v9: spellbook # v10: live tracking # v11: save effects (adv/dis) # v12: add cached dicecloud spell list id # v13: added nonstrict spells # v14: added race, background (for experimental purposes only) # v15: migrated to new sheet system # beyond # v1: initial implementation # v2: added race/background for research purposes # v15: standardize sheet import versions # all # v16: explicit spellcasting mod import # v17: refactor to use StatBlock, AttackList
1.996106
2
sfeprapy/mcs0/__init__.py
fsepy/sfeprapy
4
6629821
<reponame>fsepy/sfeprapy<gh_stars>1-10 # -*- coding: utf-8 -*- import pandas as pd from sfeprapy.func.mcs_gen import dict_flatten def __example_config_dict(): return dict(n_threads=2, cwd='') def __example_input_dict(): y = { "Standard Case 1": dict( case_name="Standard Case 1", n_simulations=1000, fire_time_step=10, fire_time_duration=18000, fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), phi_teq=dict(dist="constant_", ubound=1, lbound=1, mean=0, sd=0), beam_cross_section_area=0.017, beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), beam_position_vertical=3.2, beam_rho=7850, fire_mode=3, fire_gamma_fi_q=1, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700, protection_k=0.2, protection_protected_perimeter=2.14, protection_rho=800, room_breadth=16, room_depth=31.25, room_height=3.3, room_wall_thermal_inertia=720, solver_temperature_goal=893.15, solver_max_iter=20, solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0300, solver_tol=1.0, window_height=2.8, window_width=72, window_open_fraction_permanent=0, timber_exposed_area=0, timber_charring_rate=0.7, # mm/min timber_hc=13.2, # MJ/kg timber_density=400, # [kg/m3] timber_solver_ilim=20, timber_solver_tol=1, p1=3e-7, p2=0.1, p3=0.25, p4=0.09, general_room_floor_area=500, ), "Standard Case 2 (with teq_phi)": dict( case_name="Standard Case 2 (with teq_phi)", n_simulations=1000, fire_time_step=10, fire_time_duration=18000, fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), phi_teq=dict(dist="lognorm_", ubound=3, lbound=0.00001, mean=1, sd=0.25), beam_cross_section_area=0.017, beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), beam_position_vertical=3.2, beam_rho=7850, fire_mode=3, fire_gamma_fi_q=1, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700, protection_k=0.2, protection_protected_perimeter=2.14, protection_rho=800, room_breadth=16, room_depth=31.25, room_height=3.3, room_wall_thermal_inertia=720, solver_temperature_goal=893.15, solver_max_iter=20, solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0300, solver_tol=1.0, window_height=2.8, window_width=72, window_open_fraction_permanent=0, timber_exposed_area=0, timber_charring_rate=0.7, # [mm/min] timber_hc=13.2, # [MJ/kg] timber_density=400, # [kg/m3] timber_solver_ilim=20, timber_solver_tol=1, p1=3e-7, p2=0.1, p3=0.25, p4=0.09, general_room_floor_area=500, ), "Standard Case 3 (with timber)": dict( case_name="Standard Case 3 (with timber)", n_simulations=1000, fire_time_step=10, fire_time_duration=18000, fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), phi_teq=dict(dist="constant_", ubound=1, lbound=1, mean=0, sd=0), beam_cross_section_area=0.017, beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), beam_position_vertical=3.2, beam_rho=7850, fire_mode=3, fire_gamma_fi_q=1, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700, protection_k=0.2, protection_protected_perimeter=2.14, protection_rho=800, room_breadth=16, room_depth=31.25, room_height=3.3, room_wall_thermal_inertia=720, solver_temperature_goal=893.15, solver_max_iter=20, solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0300, solver_tol=1.0, window_height=2.8, window_width=72, window_open_fraction_permanent=0, timber_exposed_area=500, timber_charring_rate=0.7, # mm/min timber_hc=13.2, # MJ/kg timber_density=400, # [kg/m3] timber_solver_ilim=20, timber_solver_tol=1, p1=3e-7, p2=0.1, p3=0.25, p4=0.09, general_room_floor_area=500, ), } return y def __example_input_csv(x: dict): y = {k: dict_flatten(v) for k, v in x.items()} y = pd.DataFrame.from_dict(y, orient="columns") y.index.name = "PARAMETERS" y = y.to_csv(index=True, line_terminator='\n') return y def __example_input_df(x: dict) -> pd.DataFrame: y = {k: dict_flatten(v) for k, v in x.items()} y = pd.DataFrame.from_dict(y, orient="columns") y.index.name = "PARAMETERS" return y EXAMPLE_CONFIG_DICT = __example_config_dict() EXAMPLE_INPUT_DICT = __example_input_dict() EXAMPLE_INPUT_CSV = __example_input_csv(__example_input_dict()) EXAMPLE_INPUT_DF = __example_input_df(__example_input_dict()) if __name__ == "__main__": print(EXAMPLE_CONFIG_DICT, "\n") print(EXAMPLE_INPUT_DICT, "\n") print(EXAMPLE_INPUT_CSV, "\n")
# -*- coding: utf-8 -*- import pandas as pd from sfeprapy.func.mcs_gen import dict_flatten def __example_config_dict(): return dict(n_threads=2, cwd='') def __example_input_dict(): y = { "Standard Case 1": dict( case_name="Standard Case 1", n_simulations=1000, fire_time_step=10, fire_time_duration=18000, fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), phi_teq=dict(dist="constant_", ubound=1, lbound=1, mean=0, sd=0), beam_cross_section_area=0.017, beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), beam_position_vertical=3.2, beam_rho=7850, fire_mode=3, fire_gamma_fi_q=1, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700, protection_k=0.2, protection_protected_perimeter=2.14, protection_rho=800, room_breadth=16, room_depth=31.25, room_height=3.3, room_wall_thermal_inertia=720, solver_temperature_goal=893.15, solver_max_iter=20, solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0300, solver_tol=1.0, window_height=2.8, window_width=72, window_open_fraction_permanent=0, timber_exposed_area=0, timber_charring_rate=0.7, # mm/min timber_hc=13.2, # MJ/kg timber_density=400, # [kg/m3] timber_solver_ilim=20, timber_solver_tol=1, p1=3e-7, p2=0.1, p3=0.25, p4=0.09, general_room_floor_area=500, ), "Standard Case 2 (with teq_phi)": dict( case_name="Standard Case 2 (with teq_phi)", n_simulations=1000, fire_time_step=10, fire_time_duration=18000, fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), phi_teq=dict(dist="lognorm_", ubound=3, lbound=0.00001, mean=1, sd=0.25), beam_cross_section_area=0.017, beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), beam_position_vertical=3.2, beam_rho=7850, fire_mode=3, fire_gamma_fi_q=1, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700, protection_k=0.2, protection_protected_perimeter=2.14, protection_rho=800, room_breadth=16, room_depth=31.25, room_height=3.3, room_wall_thermal_inertia=720, solver_temperature_goal=893.15, solver_max_iter=20, solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0300, solver_tol=1.0, window_height=2.8, window_width=72, window_open_fraction_permanent=0, timber_exposed_area=0, timber_charring_rate=0.7, # [mm/min] timber_hc=13.2, # [MJ/kg] timber_density=400, # [kg/m3] timber_solver_ilim=20, timber_solver_tol=1, p1=3e-7, p2=0.1, p3=0.25, p4=0.09, general_room_floor_area=500, ), "Standard Case 3 (with timber)": dict( case_name="Standard Case 3 (with timber)", n_simulations=1000, fire_time_step=10, fire_time_duration=18000, fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), phi_teq=dict(dist="constant_", ubound=1, lbound=1, mean=0, sd=0), beam_cross_section_area=0.017, beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), beam_position_vertical=3.2, beam_rho=7850, fire_mode=3, fire_gamma_fi_q=1, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700, protection_k=0.2, protection_protected_perimeter=2.14, protection_rho=800, room_breadth=16, room_depth=31.25, room_height=3.3, room_wall_thermal_inertia=720, solver_temperature_goal=893.15, solver_max_iter=20, solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0300, solver_tol=1.0, window_height=2.8, window_width=72, window_open_fraction_permanent=0, timber_exposed_area=500, timber_charring_rate=0.7, # mm/min timber_hc=13.2, # MJ/kg timber_density=400, # [kg/m3] timber_solver_ilim=20, timber_solver_tol=1, p1=3e-7, p2=0.1, p3=0.25, p4=0.09, general_room_floor_area=500, ), } return y def __example_input_csv(x: dict): y = {k: dict_flatten(v) for k, v in x.items()} y = pd.DataFrame.from_dict(y, orient="columns") y.index.name = "PARAMETERS" y = y.to_csv(index=True, line_terminator='\n') return y def __example_input_df(x: dict) -> pd.DataFrame: y = {k: dict_flatten(v) for k, v in x.items()} y = pd.DataFrame.from_dict(y, orient="columns") y.index.name = "PARAMETERS" return y EXAMPLE_CONFIG_DICT = __example_config_dict() EXAMPLE_INPUT_DICT = __example_input_dict() EXAMPLE_INPUT_CSV = __example_input_csv(__example_input_dict()) EXAMPLE_INPUT_DF = __example_input_df(__example_input_dict()) if __name__ == "__main__": print(EXAMPLE_CONFIG_DICT, "\n") print(EXAMPLE_INPUT_DICT, "\n") print(EXAMPLE_INPUT_CSV, "\n")
en
0.555674
# -*- coding: utf-8 -*- # mm/min # MJ/kg # [kg/m3] # [mm/min] # [MJ/kg] # [kg/m3] # mm/min # MJ/kg # [kg/m3]
2.138483
2
tests/test_bindings.py
mr-rodgers/coil
0
6629822
<filename>tests/test_bindings.py<gh_stars>0 import asyncio from typing import Any, AsyncIterable, List, Tuple, cast import pytest from aiostream import pipe, stream from coil import bind from coil.protocols import Bindable, Bound, TwoWayBound from .conftest import Size, Window @pytest.mark.asyncio @pytest.mark.parametrize("num_values", [100]) async def test_bind_produces_result_stream( window: Window, num_values: int ) -> None: bound_value = bind((window, "size")) event_stream = stream.iterate(bound_value.events()) | pipe.take(num_values) await asyncio.sleep(0) sent_sizes = [] async def drain(s: AsyncIterable[Any]) -> List[Any]: return await stream.list(s) task = asyncio.create_task(drain(event_stream)) for i in range(num_values): perc = (i + 1) / num_values size = Size(perc * 1024, perc * 768) window.size = size sent_sizes.append(size) assert sent_sizes == [val["value"] for val in await task] @pytest.mark.asyncio @pytest.mark.parametrize("new_size", [Size(1920, 1080), Size(1280, 720)]) async def test_setting_value_from_two_way_bind( window: Window, new_size: Size ) -> None: assert window.size != new_size bound_value = bind((cast(Bindable, window), "size"), readonly=False) await bound_value.set(new_size) assert window.size == new_size @pytest.mark.asyncio @pytest.mark.parametrize( "bindargs", [ (pytest.lazy_fixture("box"), "value"), (pytest.lazy_fixture("window"), "size"), ], ) async def test_binding_types(bindargs: Tuple[Bindable, str]) -> None: assert isinstance(bind(bindargs), Bound) assert not isinstance(bind(bindargs), TwoWayBound) assert isinstance(bind(bindargs, readonly=True), Bound) assert not isinstance(bind(bindargs, readonly=True), TwoWayBound) assert isinstance(bind(bindargs, readonly=False), Bound) assert isinstance(bind(bindargs, readonly=False), TwoWayBound)
<filename>tests/test_bindings.py<gh_stars>0 import asyncio from typing import Any, AsyncIterable, List, Tuple, cast import pytest from aiostream import pipe, stream from coil import bind from coil.protocols import Bindable, Bound, TwoWayBound from .conftest import Size, Window @pytest.mark.asyncio @pytest.mark.parametrize("num_values", [100]) async def test_bind_produces_result_stream( window: Window, num_values: int ) -> None: bound_value = bind((window, "size")) event_stream = stream.iterate(bound_value.events()) | pipe.take(num_values) await asyncio.sleep(0) sent_sizes = [] async def drain(s: AsyncIterable[Any]) -> List[Any]: return await stream.list(s) task = asyncio.create_task(drain(event_stream)) for i in range(num_values): perc = (i + 1) / num_values size = Size(perc * 1024, perc * 768) window.size = size sent_sizes.append(size) assert sent_sizes == [val["value"] for val in await task] @pytest.mark.asyncio @pytest.mark.parametrize("new_size", [Size(1920, 1080), Size(1280, 720)]) async def test_setting_value_from_two_way_bind( window: Window, new_size: Size ) -> None: assert window.size != new_size bound_value = bind((cast(Bindable, window), "size"), readonly=False) await bound_value.set(new_size) assert window.size == new_size @pytest.mark.asyncio @pytest.mark.parametrize( "bindargs", [ (pytest.lazy_fixture("box"), "value"), (pytest.lazy_fixture("window"), "size"), ], ) async def test_binding_types(bindargs: Tuple[Bindable, str]) -> None: assert isinstance(bind(bindargs), Bound) assert not isinstance(bind(bindargs), TwoWayBound) assert isinstance(bind(bindargs, readonly=True), Bound) assert not isinstance(bind(bindargs, readonly=True), TwoWayBound) assert isinstance(bind(bindargs, readonly=False), Bound) assert isinstance(bind(bindargs, readonly=False), TwoWayBound)
none
1
2.161466
2
utils/files_to_constants.py
yaqwsx/CppLink
0
6629823
#! /usr/bin/env python import sys import ntpath escape = [('\\', '\\\\'), ('\'', '\\\''), ('"', '\\"'), ('?', '\\?'), ('\n', '\\n'), ('\r', ''), ('#pragma once', '')] if len(sys.argv) < 3: print("Invalid usage! Please specify output file and source files") sys.exit(1) with open(sys.argv[1] + ".h", "w") as header, open(sys.argv[1] + ".cpp", "w") as source: header.write("#pragma once\n") source.write("#include \"{0}.h\"\n".format(sys.argv[1])) for item in sys.argv[2:]: with open(item) as f: content = f.readlines() item = ntpath.basename(item).upper().replace(".", "_") header.write("extern const char* {0};\n".format(item)) source.write("const char* {0} = \n".format(item)) for line in content: for pattern, replacement in escape: line = line.replace(pattern, replacement) source.write("\t\"{0}\"\n".format(line)) source.write("\t;\n\n")
#! /usr/bin/env python import sys import ntpath escape = [('\\', '\\\\'), ('\'', '\\\''), ('"', '\\"'), ('?', '\\?'), ('\n', '\\n'), ('\r', ''), ('#pragma once', '')] if len(sys.argv) < 3: print("Invalid usage! Please specify output file and source files") sys.exit(1) with open(sys.argv[1] + ".h", "w") as header, open(sys.argv[1] + ".cpp", "w") as source: header.write("#pragma once\n") source.write("#include \"{0}.h\"\n".format(sys.argv[1])) for item in sys.argv[2:]: with open(item) as f: content = f.readlines() item = ntpath.basename(item).upper().replace(".", "_") header.write("extern const char* {0};\n".format(item)) source.write("const char* {0} = \n".format(item)) for line in content: for pattern, replacement in escape: line = line.replace(pattern, replacement) source.write("\t\"{0}\"\n".format(line)) source.write("\t;\n\n")
ru
0.148623
#! /usr/bin/env python
2.607186
3
instrumentation/opentelemetry-instrumentation-pyramid/tests/pyramid_base_test.py
epsagon/opentelemetry-python-contrib
0
6629824
<gh_stars>0 # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pyramid.httpexceptions as exc from pyramid.response import Response from werkzeug.test import Client from werkzeug.wrappers import BaseResponse class InstrumentationTest: @staticmethod def _hello_endpoint(request): helloid = int(request.matchdict["helloid"]) if helloid == 500: raise exc.HTTPInternalServerError() if helloid == 900: raise NotImplementedError() return Response("Hello: " + str(helloid)) @staticmethod def _custom_response_header_endpoint(request): headers = { "content-type": "text/plain; charset=utf-8", "content-length": "7", "my-custom-header": "my-custom-value-1,my-custom-header-2", "dont-capture-me": "test-value", } return Response("Testing", headers=headers) def _common_initialization(self, config): # pylint: disable=unused-argument def excluded_endpoint(request): return Response("excluded") # pylint: disable=unused-argument def excluded2_endpoint(request): return Response("excluded2") config.add_route("hello", "/hello/{helloid}") config.add_view(self._hello_endpoint, route_name="hello") config.add_route("excluded_arg", "/excluded/{helloid}") config.add_view(self._hello_endpoint, route_name="excluded_arg") config.add_route("excluded", "/excluded_noarg") config.add_view(excluded_endpoint, route_name="excluded") config.add_route("excluded2", "/excluded_noarg2") config.add_view(excluded2_endpoint, route_name="excluded2") config.add_route( "custom_response_headers", "/test_custom_response_headers" ) config.add_view( self._custom_response_header_endpoint, route_name="custom_response_headers", ) # pylint: disable=attribute-defined-outside-init self.client = Client(config.make_wsgi_app(), BaseResponse)
# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pyramid.httpexceptions as exc from pyramid.response import Response from werkzeug.test import Client from werkzeug.wrappers import BaseResponse class InstrumentationTest: @staticmethod def _hello_endpoint(request): helloid = int(request.matchdict["helloid"]) if helloid == 500: raise exc.HTTPInternalServerError() if helloid == 900: raise NotImplementedError() return Response("Hello: " + str(helloid)) @staticmethod def _custom_response_header_endpoint(request): headers = { "content-type": "text/plain; charset=utf-8", "content-length": "7", "my-custom-header": "my-custom-value-1,my-custom-header-2", "dont-capture-me": "test-value", } return Response("Testing", headers=headers) def _common_initialization(self, config): # pylint: disable=unused-argument def excluded_endpoint(request): return Response("excluded") # pylint: disable=unused-argument def excluded2_endpoint(request): return Response("excluded2") config.add_route("hello", "/hello/{helloid}") config.add_view(self._hello_endpoint, route_name="hello") config.add_route("excluded_arg", "/excluded/{helloid}") config.add_view(self._hello_endpoint, route_name="excluded_arg") config.add_route("excluded", "/excluded_noarg") config.add_view(excluded_endpoint, route_name="excluded") config.add_route("excluded2", "/excluded_noarg2") config.add_view(excluded2_endpoint, route_name="excluded2") config.add_route( "custom_response_headers", "/test_custom_response_headers" ) config.add_view( self._custom_response_header_endpoint, route_name="custom_response_headers", ) # pylint: disable=attribute-defined-outside-init self.client = Client(config.make_wsgi_app(), BaseResponse)
en
0.784373
# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-argument # pylint: disable=unused-argument # pylint: disable=attribute-defined-outside-init
1.871557
2
Scripts/transform_xml_to_df.py
jiunsiew/NY_Philarchive_performanceHistory
0
6629825
# -*- coding: utf-8 -*- """ Created on Tue Apr 26 21:53:24 2016 @author: jiun Data out of xml and into a Postgres db. Largely based on composer_frequency.py """ #import modules from __future__ import division # from sys import argv import re # from collections import Counter # from sets import Set # import matplotlib.pyplot as plt import xml.etree.ElementTree as ET import os import os.path import pandas as pd os.chdir('/Users/jiun/Documents/analytics/NY_Philarchive_performanceHistory/Programs') #create xml collection of "docs" (i.e., programs in NYPhil's definition) tree = ET.parse('complete.xml') root = tree.getroot() concerts = root.findall('program') #convert season listing (e.g., 1842-43) to a single leading year (1842) def simplify_date(hyphenated_season): simple_season = re.sub(r'(\d{4})-\d{2}',r'\1',hyphenated_season) return simple_season def clean_date(date_string): tmp = re.search('^\d{4}-\d{2}-\d{2}', date_string) return date_string[tmp.start():tmp.end()] #get the composer's last name only from the worksComposerTitle elements def get_name(work): composer_name = re.sub(r'(.*?)(,| \[).*',r'\1',work) composer_name = re.sub(r"(.*)'(.*)",r"\1\\'\2",composer_name) return composer_name def extract_text(obj): if obj is None: return 'NA' else: return obj.text #gather info from XML file all_seasons = [] composers = [] current_season = '1842' total_works = 0 # create a long table with the following columns: # programID, date,Time, eventType, Location, Venue, composerName, workTitle, conductorName for c in concerts: # season = simplify_date(c.find('season').text) programID = c.find('programID').text # concert info concertInfo = c.find('concertInfo') date = clean_date(concertInfo.find('Date').text) time = concertInfo.find('Time').text eventType = concertInfo.find('eventType').text location = concertInfo.find('Location').text venue = concertInfo.find('Venue').text # work info # stopped here: need to handle the case where there are workInfo = c.find('worksInfo') for w in workInfo.findall('work'): composer = extract_text(w.find('composerName')) title = extract_text(w.find('workTitle')) conductor = extract_text(w.find('conductorName')) row = [programID, date, time, eventType, location, venue, composer, title] #, conductor] all_seasons.append(row) # convert to a data frame output_df = pd.DataFrame(all_seasons, columns = ['programID', 'date', 'Time', 'eventType', 'Location', 'Venue', 'composerName', 'workTitle']) output_df.to_csv('NY_Philharmonic_df.csv', sep = '|', encoding = 'utf-8')
# -*- coding: utf-8 -*- """ Created on Tue Apr 26 21:53:24 2016 @author: jiun Data out of xml and into a Postgres db. Largely based on composer_frequency.py """ #import modules from __future__ import division # from sys import argv import re # from collections import Counter # from sets import Set # import matplotlib.pyplot as plt import xml.etree.ElementTree as ET import os import os.path import pandas as pd os.chdir('/Users/jiun/Documents/analytics/NY_Philarchive_performanceHistory/Programs') #create xml collection of "docs" (i.e., programs in NYPhil's definition) tree = ET.parse('complete.xml') root = tree.getroot() concerts = root.findall('program') #convert season listing (e.g., 1842-43) to a single leading year (1842) def simplify_date(hyphenated_season): simple_season = re.sub(r'(\d{4})-\d{2}',r'\1',hyphenated_season) return simple_season def clean_date(date_string): tmp = re.search('^\d{4}-\d{2}-\d{2}', date_string) return date_string[tmp.start():tmp.end()] #get the composer's last name only from the worksComposerTitle elements def get_name(work): composer_name = re.sub(r'(.*?)(,| \[).*',r'\1',work) composer_name = re.sub(r"(.*)'(.*)",r"\1\\'\2",composer_name) return composer_name def extract_text(obj): if obj is None: return 'NA' else: return obj.text #gather info from XML file all_seasons = [] composers = [] current_season = '1842' total_works = 0 # create a long table with the following columns: # programID, date,Time, eventType, Location, Venue, composerName, workTitle, conductorName for c in concerts: # season = simplify_date(c.find('season').text) programID = c.find('programID').text # concert info concertInfo = c.find('concertInfo') date = clean_date(concertInfo.find('Date').text) time = concertInfo.find('Time').text eventType = concertInfo.find('eventType').text location = concertInfo.find('Location').text venue = concertInfo.find('Venue').text # work info # stopped here: need to handle the case where there are workInfo = c.find('worksInfo') for w in workInfo.findall('work'): composer = extract_text(w.find('composerName')) title = extract_text(w.find('workTitle')) conductor = extract_text(w.find('conductorName')) row = [programID, date, time, eventType, location, venue, composer, title] #, conductor] all_seasons.append(row) # convert to a data frame output_df = pd.DataFrame(all_seasons, columns = ['programID', 'date', 'Time', 'eventType', 'Location', 'Venue', 'composerName', 'workTitle']) output_df.to_csv('NY_Philharmonic_df.csv', sep = '|', encoding = 'utf-8')
en
0.743938
# -*- coding: utf-8 -*- Created on Tue Apr 26 21:53:24 2016 @author: jiun Data out of xml and into a Postgres db. Largely based on composer_frequency.py #import modules # from sys import argv # from collections import Counter # from sets import Set # import matplotlib.pyplot as plt #create xml collection of "docs" (i.e., programs in NYPhil's definition) #convert season listing (e.g., 1842-43) to a single leading year (1842) #get the composer's last name only from the worksComposerTitle elements #gather info from XML file # create a long table with the following columns: # programID, date,Time, eventType, Location, Venue, composerName, workTitle, conductorName # season = simplify_date(c.find('season').text) # concert info # work info # stopped here: need to handle the case where there are #, conductor] # convert to a data frame
2.697751
3
items.py
rullmann/bundlewrap-centos-vnstat
0
6629826
pkg_dnf = { 'vnstat': {}, } svc_systemd = { 'vnstat': { 'needs': ['pkg_dnf:vnstat'] }, } actions = {} files = { '/etc/sysconfig/vnstat': { 'source': 'sysconfig_vnstat', 'mode': '0644', 'content_type': 'mako', 'needs': ['pkg_dnf:vnstat'], 'triggers': ['svc_systemd:vnstat:restart'], }, '/etc/vnstat.conf': { 'source': 'vnstat.conf', 'mode': '0644', 'content_type': 'mako', 'needs': ['pkg_dnf:vnstat'], 'triggers': ['svc_systemd:vnstat:restart'], }, } directories = { '/var/lib/vnstat': { 'mode': '6755', 'owner': 'vnstat', 'needs': ['pkg_dnf:vnstat'], }, } for interface in node.metadata['interfaces']: actions['vnstat_create_database_{}'.format(interface)] = { 'command': 'vnstat -u -i {}'.format(interface), 'unless': 'test -f /var/lib/vnstat/{}'.format(interface), 'cascade_skip': False, 'needs': ['pkg_dnf:vnstat'], 'triggers': ['svc_systemd:vnstat:restart'], }
pkg_dnf = { 'vnstat': {}, } svc_systemd = { 'vnstat': { 'needs': ['pkg_dnf:vnstat'] }, } actions = {} files = { '/etc/sysconfig/vnstat': { 'source': 'sysconfig_vnstat', 'mode': '0644', 'content_type': 'mako', 'needs': ['pkg_dnf:vnstat'], 'triggers': ['svc_systemd:vnstat:restart'], }, '/etc/vnstat.conf': { 'source': 'vnstat.conf', 'mode': '0644', 'content_type': 'mako', 'needs': ['pkg_dnf:vnstat'], 'triggers': ['svc_systemd:vnstat:restart'], }, } directories = { '/var/lib/vnstat': { 'mode': '6755', 'owner': 'vnstat', 'needs': ['pkg_dnf:vnstat'], }, } for interface in node.metadata['interfaces']: actions['vnstat_create_database_{}'.format(interface)] = { 'command': 'vnstat -u -i {}'.format(interface), 'unless': 'test -f /var/lib/vnstat/{}'.format(interface), 'cascade_skip': False, 'needs': ['pkg_dnf:vnstat'], 'triggers': ['svc_systemd:vnstat:restart'], }
none
1
1.805488
2
hbase_test_log_analyse/__init__.py
byscut/azrael-py27
0
6629827
# -*- coding: utf-8 -*- # @Time : 2019/1/17 16:45 # @Author : Azrael.Bai # @File : __init__.py.py
# -*- coding: utf-8 -*- # @Time : 2019/1/17 16:45 # @Author : Azrael.Bai # @File : __init__.py.py
en
0.195301
# -*- coding: utf-8 -*- # @Time : 2019/1/17 16:45 # @Author : Azrael.Bai # @File : __init__.py.py
0.937292
1
lib/admin.py
drkitty/cyder
6
6629828
"""Originally from funfactory (funfactory/admin.py) on a380a54""" from django.contrib import admin as django_admin from django.contrib.admin.sites import AdminSite from session_csrf import anonymous_csrf class SessionCsrfAdminSite(AdminSite): """Custom admin site that handles login with session_csrf.""" def login(self, request, extra_context=None): @anonymous_csrf def call_parent_login(request, extra_context): return super(SessionCsrfAdminSite, self).login(request, extra_context) return call_parent_login(request, extra_context) # This is for sites that import this file directly. site = SessionCsrfAdminSite() def monkeypatch(): django_admin.site = site
"""Originally from funfactory (funfactory/admin.py) on a380a54""" from django.contrib import admin as django_admin from django.contrib.admin.sites import AdminSite from session_csrf import anonymous_csrf class SessionCsrfAdminSite(AdminSite): """Custom admin site that handles login with session_csrf.""" def login(self, request, extra_context=None): @anonymous_csrf def call_parent_login(request, extra_context): return super(SessionCsrfAdminSite, self).login(request, extra_context) return call_parent_login(request, extra_context) # This is for sites that import this file directly. site = SessionCsrfAdminSite() def monkeypatch(): django_admin.site = site
en
0.75193
Originally from funfactory (funfactory/admin.py) on a380a54 Custom admin site that handles login with session_csrf. # This is for sites that import this file directly.
2.075806
2
python/selenium/apihelper.py
jtraver/dev
0
6629829
#!/usr/bin/python """Cheap and simple API helper This program is part of "Dive Into Python", a free Python book for experienced programmers. Visit http://diveintopython.org/ for the latest version. """ __author__ = "<NAME> (<EMAIL>)" __version__ = "$Revision: 1.3 $" __date__ = "$Date: 2004/05/05 21:57:19 $" __copyright__ = "Copyright (c) 2001 <NAME>" __license__ = "Python" # While this is a good example script to teach about introspection, # in real life it has been superceded by PyDoc, which is part of the # standard library in Python 2.1 and later. # # Your IDE may already import the "help" function from pydoc # automatically on startup; if not, do this: # # >>> from pydoc import help # # The help function in this module takes the object itself to get # help on, but PyDoc can also take a string, like this: # # >>> help("string") # gets help on the string module # >>> help("apihelper.help") # gets help on the function below # >>> help() # enters an interactive help mode # # PyDoc can also act as an HTTP server to dynamically produce # HTML-formatted documentation of any module in your path. # That's wicked cool. Read more about PyDoc here: # http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html def info(object, spacing=10, collapse=1): """Print methods and doc strings. Takes module, class, list, dictionary, or string.""" methodList = [e for e in dir(object) if callable(getattr(object, e))] processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s) print "\n".join(["\n%s\n\t%s" % (method.ljust(spacing), processFunc(str(getattr(object, method).__doc__))) for method in methodList]) if __name__ == "__main__": print help.__doc__
#!/usr/bin/python """Cheap and simple API helper This program is part of "Dive Into Python", a free Python book for experienced programmers. Visit http://diveintopython.org/ for the latest version. """ __author__ = "<NAME> (<EMAIL>)" __version__ = "$Revision: 1.3 $" __date__ = "$Date: 2004/05/05 21:57:19 $" __copyright__ = "Copyright (c) 2001 <NAME>" __license__ = "Python" # While this is a good example script to teach about introspection, # in real life it has been superceded by PyDoc, which is part of the # standard library in Python 2.1 and later. # # Your IDE may already import the "help" function from pydoc # automatically on startup; if not, do this: # # >>> from pydoc import help # # The help function in this module takes the object itself to get # help on, but PyDoc can also take a string, like this: # # >>> help("string") # gets help on the string module # >>> help("apihelper.help") # gets help on the function below # >>> help() # enters an interactive help mode # # PyDoc can also act as an HTTP server to dynamically produce # HTML-formatted documentation of any module in your path. # That's wicked cool. Read more about PyDoc here: # http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html def info(object, spacing=10, collapse=1): """Print methods and doc strings. Takes module, class, list, dictionary, or string.""" methodList = [e for e in dir(object) if callable(getattr(object, e))] processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s) print "\n".join(["\n%s\n\t%s" % (method.ljust(spacing), processFunc(str(getattr(object, method).__doc__))) for method in methodList]) if __name__ == "__main__": print help.__doc__
en
0.765234
#!/usr/bin/python Cheap and simple API helper This program is part of "Dive Into Python", a free Python book for experienced programmers. Visit http://diveintopython.org/ for the latest version. # While this is a good example script to teach about introspection, # in real life it has been superceded by PyDoc, which is part of the # standard library in Python 2.1 and later. # # Your IDE may already import the "help" function from pydoc # automatically on startup; if not, do this: # # >>> from pydoc import help # # The help function in this module takes the object itself to get # help on, but PyDoc can also take a string, like this: # # >>> help("string") # gets help on the string module # >>> help("apihelper.help") # gets help on the function below # >>> help() # enters an interactive help mode # # PyDoc can also act as an HTTP server to dynamically produce # HTML-formatted documentation of any module in your path. # That's wicked cool. Read more about PyDoc here: # http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html Print methods and doc strings. Takes module, class, list, dictionary, or string.
3.70287
4
game/game_adapted.py
Killy85/game_ai_trainer
0
6629830
<reponame>Killy85/game_ai_trainer<gh_stars>0 # -*- coding: cp1252 -*- from random import random from pygame import * from math import cos, sin, pi import os.path import sys """ Une addaptation du jeu pour communiquer aver l'ia Description : - start () : void - action("Droite"/"gauche") : action_feedback """ # couleurs black = (0, 0, 0) red = (255, 0, 0) green = (0, 255, 0) blue = (0, 0, 255) white = (255, 255, 255) #Fonction pour charger les images def load_image(name, colorkey=None): fullname = os.path.join(os.getcwd(), 'images', name) img = image.load(fullname) img = img.convert() if colorkey is not None: if colorkey is -1: colorkey = img.get_at((0, 0)) img.set_colorkey(colorkey, RLEACCEL) return img, img.get_rect() class Brique(sprite.Sprite): """Les briques sont détruites par la balle.""" def __init__(self, screen, x, y): sprite.Sprite.__init__(self) self.screen = screen self.x, self.y = x, y self.image, self.rect = load_image('brique.GIF') self.rect.topleft = (self.x, self.y) class Balle(sprite.Sprite): """Une balle qui se déplace sur l'écran.""" def __init__(self, screen): sprite.Sprite.__init__(self) self.screen = screen self.area = self.screen.get_rect() self.image, self.rect = load_image('balle.GIF', -1) self.reinit() self.is_lauched = False self.has_bounced = False def reinit(self): self.rect.centerx = self.area.centerx self.rect.centery = 480 self.angle = -1*(random() * 2.74 + 0.2) # pi/3.3 self.flag = 1 def update(self): if self.flag == 1: dx, dy = 7*cos(self.angle), 7*sin(self.angle) self.rect = self.rect.move(dx, dy) #Collision sur les parois de l'écran if not self.area.contains(self.rect): tl = not self.area.collidepoint(self.rect.topleft) tr = not self.area.collidepoint(self.rect.topright) bl = not self.area.collidepoint(self.rect.bottomleft) br = not self.area.collidepoint(self.rect.bottomright) if tr and tl or (br and bl): self.angle = -self.angle if tl and bl: self.angle = pi - self.angle if tr and br: self.angle = pi - self.angle if bl and br: self.reinit() j.reinit() j.vies = j.vies - 1 #Collision avec la raquette du joueur elif self.rect.colliderect(j.rect): self.rect.bottom = j.rect.top self.angle = -self.angle if(j.rect.x > self.rect.centerx): self.angle -= 0.3 else: self.angle += 0.2 if self.angle > 2.8 : self.angle = 2.8 elif self.angle < 0.2 and self.angle >= 0.0: self.angle = 0.2 elif self.angle > -0.2: self.angle = 0.2 elif self.angle < -2.8: self.angle = -2.8 self.has_bounced = True #Collision avec une brique du groupe briquesprite collision = sprite.spritecollide(self, brs, 1) if collision: self.angle = -self.angle j.score = j.score + 5*(len(collision)) def start(self): self.flag = 1 def pause(self): if self.flag == 1: self.flag = 'p' return if self.flag == 'p': self.flag = 1 class Raquette(sprite.Sprite): """Une raquette pour empécher la balle de tomber.""" def __init__(self, screen): sprite.Sprite.__init__(self) self.image, self.rect = load_image('raquette.GIF') self.screen = screen self.area = self.screen.get_rect() self.score = 0 self.vies_max = 5 self.vies = self.vies_max self.font1 = font.Font(None, 25) self.font2 = font.Font(None, 25) self.reinit() def update(self): text1 = self.font1.render("Score : "+str(self.score), 1, (0, 0, 0)) text1pos = text1.get_rect(topleft=(5, 5)) text2 = self.font2.render("Vies : "+str(self.vies), 1, (0, 0, 0)) text2pos = text2.get_rect(topright=(515, 5)) self.screen.blit(text1, text1pos) self.screen.blit(text2, text2pos) def reinit(self): self.rect.centerx = self.area.centerx self.rect.centery = 510 self.flag = 0 def left(self): if self.flag == 0: rect = self.rect.move((-60, 0)) if rect.left < 0: rect.left = 0 self.rect = rect def right(self): if self.flag == 0: rect = self.rect.move((60, 0)) if rect.right > self.area.right: rect.right = self.area.right self.rect = rect def start(self): self.flag = 1 def pause(self): if self.flag == 1: self.flag = 'p' return if self.flag == 'p': self.flag = 1 class Game_adapted(): def __init__(self, limit_fps = True): self.limit_fps = limit_fps """ """ def briques(self, screen, niv): #Construction des briques selon le niveau briques = sprite.RenderPlain() if niv == 1: x, y = 7, 40 while x < 470: brique = Brique(screen, x, y) briques.add(brique) y = y + 16 if y == 200: y = 40 x = x + 1 if niv == 2: x, y = 7, 40 while x < 470: brique = Brique(screen, x, y) briques.add(brique) y = y + 16 if y == 200: y = 40 x = x + 46 return briques def initialisation(self, screen): balle = Balle(screen) joueur = Raquette(screen) ballesprite = sprite.RenderPlain(balle) joueursprite = sprite.RenderPlain(joueur) briquesprite = self.briques(screen, 2) return balle, joueur, ballesprite, joueursprite, briquesprite def start(self): self.b.start() j.start() def pause(self): self.b.pause() j.pause() def update_frame(self, movement): reward = 0 rectPos = j.rect.x startBrs = len(brs) start_vie = j.vies #flag2 = True nb_frame_to_do = 5 last_frame = '' for p in range(nb_frame_to_do): # while flag2: if self.limit_fps : self.chrono.tick(60) # Contréler la raquette if(movement == 2 and p == 1): # Deplacement de la barre vers la droite j.right() if(movement == 0 and p == 1): # Deplacement de la barre vers la gauche j.left() if(p == nb_frame_to_do -1): last_frame = surfarray.array2d(self.screen.copy()) if j.vies == 0: # Si le joueur n'a plus de vies msg5 = self.cadre.render("Vous avez perdu. Votre score:", 0, black) msg6 = self.cadre.render(str(j.score), 0, black) pos_msg5 = msg5.get_rect() pos_msg5.center = self.area.center pos_msg6 = msg6.get_rect() pos_msg6.center = self.area.center pos_msg6.centery = self.area.centery + 50 # Affichage à l'écran self.screen.fill(red) self.screen.blit(msg5, pos_msg5) self.screen.blit(msg6, pos_msg6) display.flip() j.vies = j.vies_max self.reset() if len(brs) == 0: # S'il n'y a plus de briques msg7 = self.cadre.render("Vous avez gagné. Votre score:", 0, black) msg8 = self.cadre.render(str(j.score), 0, black) pos_msg7 = msg7.get_rect() pos_msg7.center = self.area.center pos_msg8 = msg8.get_rect() pos_msg8.center = self.area.center pos_msg8.centery = self.area.centery + 50 # Affichage à l'écran self.screen.fill(green) self.screen.blit(msg7, pos_msg7) self.screen.blit(msg8, pos_msg8) display.flip() flag5 = True while flag5: for e in event.get(): if e.type == KEYDOWN and e.key == K_ESCAPE: flag5 = False flag2 = False if(self.b.has_bounced and self.b.is_lauched): reward += 10 self.b.has_bounced = False # Rafraichissement de l'écran pendant le jeu self.screen.fill(white) self.bs.update() self.js.update() self.bs.draw(self.screen) self.js.draw(self.screen) brs.draw(self.screen) display.flip() brick_reward = ((startBrs - len(brs)) if (startBrs - len(brs)) > 0 else 0 )* 5 life_reward = 0 #((start_vie - j.vies) * -1) self.b.is_lauched = True if(start_vie - j.vies): self.b.is_lauched = False self.b.has_bounced = False # 1.a : position du centre de la raquette # 1.b : position du centre de la balle # 2 : raward # 3 : wtf ? # return (j.rect.x, self.b.rect.x), reward+life_reward, (start_vie == 1 and j.vies == 5) # 1.a : last_frame - 6 # 1.b : last-frame - 3 # 1.c : last_frame # 2 : raward # 3 : wtf ? return last_frame, reward, (start_vie == 1 and j.vies == 5), [] def main(self): #Initialisation de l'écran init() self.screen = display.set_mode((540, 550)) display.set_caption('Casse Briques v.1.1.') icon, icon_rect = load_image('icon.GIF') display.set_icon(icon) self.area = self.screen.get_rect() self.screen.fill((0, 0, 0)) display.flip() #Accueil self.cadre = font.Font(None, 35) self.chrono = time.Clock() global j, brs self.b, j, self.bs, self.js, brs = self.initialisation(self.screen) def reset(self): self.main() return (j.rect.x, self.b.rect.x) def step(self, action): return self.update_frame(action) def render(self, limit_fps): self.limit_fps = limit_fps if __name__ == '__main__': Game_adapted().main()
# -*- coding: cp1252 -*- from random import random from pygame import * from math import cos, sin, pi import os.path import sys """ Une addaptation du jeu pour communiquer aver l'ia Description : - start () : void - action("Droite"/"gauche") : action_feedback """ # couleurs black = (0, 0, 0) red = (255, 0, 0) green = (0, 255, 0) blue = (0, 0, 255) white = (255, 255, 255) #Fonction pour charger les images def load_image(name, colorkey=None): fullname = os.path.join(os.getcwd(), 'images', name) img = image.load(fullname) img = img.convert() if colorkey is not None: if colorkey is -1: colorkey = img.get_at((0, 0)) img.set_colorkey(colorkey, RLEACCEL) return img, img.get_rect() class Brique(sprite.Sprite): """Les briques sont détruites par la balle.""" def __init__(self, screen, x, y): sprite.Sprite.__init__(self) self.screen = screen self.x, self.y = x, y self.image, self.rect = load_image('brique.GIF') self.rect.topleft = (self.x, self.y) class Balle(sprite.Sprite): """Une balle qui se déplace sur l'écran.""" def __init__(self, screen): sprite.Sprite.__init__(self) self.screen = screen self.area = self.screen.get_rect() self.image, self.rect = load_image('balle.GIF', -1) self.reinit() self.is_lauched = False self.has_bounced = False def reinit(self): self.rect.centerx = self.area.centerx self.rect.centery = 480 self.angle = -1*(random() * 2.74 + 0.2) # pi/3.3 self.flag = 1 def update(self): if self.flag == 1: dx, dy = 7*cos(self.angle), 7*sin(self.angle) self.rect = self.rect.move(dx, dy) #Collision sur les parois de l'écran if not self.area.contains(self.rect): tl = not self.area.collidepoint(self.rect.topleft) tr = not self.area.collidepoint(self.rect.topright) bl = not self.area.collidepoint(self.rect.bottomleft) br = not self.area.collidepoint(self.rect.bottomright) if tr and tl or (br and bl): self.angle = -self.angle if tl and bl: self.angle = pi - self.angle if tr and br: self.angle = pi - self.angle if bl and br: self.reinit() j.reinit() j.vies = j.vies - 1 #Collision avec la raquette du joueur elif self.rect.colliderect(j.rect): self.rect.bottom = j.rect.top self.angle = -self.angle if(j.rect.x > self.rect.centerx): self.angle -= 0.3 else: self.angle += 0.2 if self.angle > 2.8 : self.angle = 2.8 elif self.angle < 0.2 and self.angle >= 0.0: self.angle = 0.2 elif self.angle > -0.2: self.angle = 0.2 elif self.angle < -2.8: self.angle = -2.8 self.has_bounced = True #Collision avec une brique du groupe briquesprite collision = sprite.spritecollide(self, brs, 1) if collision: self.angle = -self.angle j.score = j.score + 5*(len(collision)) def start(self): self.flag = 1 def pause(self): if self.flag == 1: self.flag = 'p' return if self.flag == 'p': self.flag = 1 class Raquette(sprite.Sprite): """Une raquette pour empécher la balle de tomber.""" def __init__(self, screen): sprite.Sprite.__init__(self) self.image, self.rect = load_image('raquette.GIF') self.screen = screen self.area = self.screen.get_rect() self.score = 0 self.vies_max = 5 self.vies = self.vies_max self.font1 = font.Font(None, 25) self.font2 = font.Font(None, 25) self.reinit() def update(self): text1 = self.font1.render("Score : "+str(self.score), 1, (0, 0, 0)) text1pos = text1.get_rect(topleft=(5, 5)) text2 = self.font2.render("Vies : "+str(self.vies), 1, (0, 0, 0)) text2pos = text2.get_rect(topright=(515, 5)) self.screen.blit(text1, text1pos) self.screen.blit(text2, text2pos) def reinit(self): self.rect.centerx = self.area.centerx self.rect.centery = 510 self.flag = 0 def left(self): if self.flag == 0: rect = self.rect.move((-60, 0)) if rect.left < 0: rect.left = 0 self.rect = rect def right(self): if self.flag == 0: rect = self.rect.move((60, 0)) if rect.right > self.area.right: rect.right = self.area.right self.rect = rect def start(self): self.flag = 1 def pause(self): if self.flag == 1: self.flag = 'p' return if self.flag == 'p': self.flag = 1 class Game_adapted(): def __init__(self, limit_fps = True): self.limit_fps = limit_fps """ """ def briques(self, screen, niv): #Construction des briques selon le niveau briques = sprite.RenderPlain() if niv == 1: x, y = 7, 40 while x < 470: brique = Brique(screen, x, y) briques.add(brique) y = y + 16 if y == 200: y = 40 x = x + 1 if niv == 2: x, y = 7, 40 while x < 470: brique = Brique(screen, x, y) briques.add(brique) y = y + 16 if y == 200: y = 40 x = x + 46 return briques def initialisation(self, screen): balle = Balle(screen) joueur = Raquette(screen) ballesprite = sprite.RenderPlain(balle) joueursprite = sprite.RenderPlain(joueur) briquesprite = self.briques(screen, 2) return balle, joueur, ballesprite, joueursprite, briquesprite def start(self): self.b.start() j.start() def pause(self): self.b.pause() j.pause() def update_frame(self, movement): reward = 0 rectPos = j.rect.x startBrs = len(brs) start_vie = j.vies #flag2 = True nb_frame_to_do = 5 last_frame = '' for p in range(nb_frame_to_do): # while flag2: if self.limit_fps : self.chrono.tick(60) # Contréler la raquette if(movement == 2 and p == 1): # Deplacement de la barre vers la droite j.right() if(movement == 0 and p == 1): # Deplacement de la barre vers la gauche j.left() if(p == nb_frame_to_do -1): last_frame = surfarray.array2d(self.screen.copy()) if j.vies == 0: # Si le joueur n'a plus de vies msg5 = self.cadre.render("Vous avez perdu. Votre score:", 0, black) msg6 = self.cadre.render(str(j.score), 0, black) pos_msg5 = msg5.get_rect() pos_msg5.center = self.area.center pos_msg6 = msg6.get_rect() pos_msg6.center = self.area.center pos_msg6.centery = self.area.centery + 50 # Affichage à l'écran self.screen.fill(red) self.screen.blit(msg5, pos_msg5) self.screen.blit(msg6, pos_msg6) display.flip() j.vies = j.vies_max self.reset() if len(brs) == 0: # S'il n'y a plus de briques msg7 = self.cadre.render("Vous avez gagné. Votre score:", 0, black) msg8 = self.cadre.render(str(j.score), 0, black) pos_msg7 = msg7.get_rect() pos_msg7.center = self.area.center pos_msg8 = msg8.get_rect() pos_msg8.center = self.area.center pos_msg8.centery = self.area.centery + 50 # Affichage à l'écran self.screen.fill(green) self.screen.blit(msg7, pos_msg7) self.screen.blit(msg8, pos_msg8) display.flip() flag5 = True while flag5: for e in event.get(): if e.type == KEYDOWN and e.key == K_ESCAPE: flag5 = False flag2 = False if(self.b.has_bounced and self.b.is_lauched): reward += 10 self.b.has_bounced = False # Rafraichissement de l'écran pendant le jeu self.screen.fill(white) self.bs.update() self.js.update() self.bs.draw(self.screen) self.js.draw(self.screen) brs.draw(self.screen) display.flip() brick_reward = ((startBrs - len(brs)) if (startBrs - len(brs)) > 0 else 0 )* 5 life_reward = 0 #((start_vie - j.vies) * -1) self.b.is_lauched = True if(start_vie - j.vies): self.b.is_lauched = False self.b.has_bounced = False # 1.a : position du centre de la raquette # 1.b : position du centre de la balle # 2 : raward # 3 : wtf ? # return (j.rect.x, self.b.rect.x), reward+life_reward, (start_vie == 1 and j.vies == 5) # 1.a : last_frame - 6 # 1.b : last-frame - 3 # 1.c : last_frame # 2 : raward # 3 : wtf ? return last_frame, reward, (start_vie == 1 and j.vies == 5), [] def main(self): #Initialisation de l'écran init() self.screen = display.set_mode((540, 550)) display.set_caption('Casse Briques v.1.1.') icon, icon_rect = load_image('icon.GIF') display.set_icon(icon) self.area = self.screen.get_rect() self.screen.fill((0, 0, 0)) display.flip() #Accueil self.cadre = font.Font(None, 35) self.chrono = time.Clock() global j, brs self.b, j, self.bs, self.js, brs = self.initialisation(self.screen) def reset(self): self.main() return (j.rect.x, self.b.rect.x) def step(self, action): return self.update_frame(action) def render(self, limit_fps): self.limit_fps = limit_fps if __name__ == '__main__': Game_adapted().main()
fr
0.961405
# -*- coding: cp1252 -*- Une addaptation du jeu pour communiquer aver l'ia Description : - start () : void - action("Droite"/"gauche") : action_feedback # couleurs #Fonction pour charger les images Les briques sont détruites par la balle. Une balle qui se déplace sur l'écran. # pi/3.3 #Collision sur les parois de l'écran #Collision avec la raquette du joueur #Collision avec une brique du groupe briquesprite Une raquette pour empécher la balle de tomber. #Construction des briques selon le niveau #flag2 = True # while flag2: # Contréler la raquette # Deplacement de la barre vers la droite # Deplacement de la barre vers la gauche # Si le joueur n'a plus de vies # Affichage à l'écran # S'il n'y a plus de briques # Affichage à l'écran # Rafraichissement de l'écran pendant le jeu #((start_vie - j.vies) * -1) # 1.a : position du centre de la raquette # 1.b : position du centre de la balle # 2 : raward # 3 : wtf ? # return (j.rect.x, self.b.rect.x), reward+life_reward, (start_vie == 1 and j.vies == 5) # 1.a : last_frame - 6 # 1.b : last-frame - 3 # 1.c : last_frame # 2 : raward # 3 : wtf ? #Initialisation de l'écran #Accueil
3.307967
3
meta_reward_learning/semantic_parsing/nsm/word_embeddings.py
kiss2u/google-research
7
6629831
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import numpy as np # import gensim from tensorflow.compat.v1 import gfile class EmbeddingModel(object): def __init__(self, vocab_file, embedding_file, normalize_embeddings=True): with gfile.Open(embedding_file, 'rb') as f: self.embedding_mat = np.load(f) if normalize_embeddings: self.embedding_mat = self.embedding_mat / np.linalg.norm( self.embedding_mat, axis=1, keepdims=True) with gfile.Open(vocab_file, 'r') as f: tks = json.load(f) self.vocab = dict(zip(tks, range(len(tks)))) def __contains__(self, word): return word in self.vocab def __getitem__(self, word): if word in self.vocab: index = self.vocab[word] return self.embedding_mat[index] else: raise KeyError
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import numpy as np # import gensim from tensorflow.compat.v1 import gfile class EmbeddingModel(object): def __init__(self, vocab_file, embedding_file, normalize_embeddings=True): with gfile.Open(embedding_file, 'rb') as f: self.embedding_mat = np.load(f) if normalize_embeddings: self.embedding_mat = self.embedding_mat / np.linalg.norm( self.embedding_mat, axis=1, keepdims=True) with gfile.Open(vocab_file, 'r') as f: tks = json.load(f) self.vocab = dict(zip(tks, range(len(tks)))) def __contains__(self, word): return word in self.vocab def __getitem__(self, word): if word in self.vocab: index = self.vocab[word] return self.embedding_mat[index] else: raise KeyError
en
0.836306
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import gensim
2.588117
3
src/ncclient/get.py
fredhsu/openmgmt
30
6629832
<filename>src/ncclient/get.py from ncclient import manager eos=manager.connect(host="10.83.28.203", port="830", timeout=30, username="arista", password="<PASSWORD>", hostkey_verify=False) # Get interface Ethernet 3 operational status int_eth3_op_status = ''' <interfaces> <interface> <name> Ethernet3 </name> <state> <oper-status> </oper-status> </state> </interface> </interfaces> ''' get_int_eth3_op_status = eos.get(filter=("subtree", int_eth3_op_status)) print (get_int_eth3_op_status) eos.close_session()
<filename>src/ncclient/get.py from ncclient import manager eos=manager.connect(host="10.83.28.203", port="830", timeout=30, username="arista", password="<PASSWORD>", hostkey_verify=False) # Get interface Ethernet 3 operational status int_eth3_op_status = ''' <interfaces> <interface> <name> Ethernet3 </name> <state> <oper-status> </oper-status> </state> </interface> </interfaces> ''' get_int_eth3_op_status = eos.get(filter=("subtree", int_eth3_op_status)) print (get_int_eth3_op_status) eos.close_session()
en
0.215162
# Get interface Ethernet 3 operational status <interfaces> <interface> <name> Ethernet3 </name> <state> <oper-status> </oper-status> </state> </interface> </interfaces>
2.054289
2
src/api/bkuser_core/tests/apis/v2/profiles/test_login.py
Canway-shiisa/bk-user
0
6629833
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import datetime import time import pytest from django.contrib.auth.hashers import make_password from django.utils.timezone import now from bkuser_core.categories.constants import CategoryStatus from bkuser_core.profiles.constants import ProfileStatus, RoleCodeEnum from bkuser_core.profiles.v2.views import ProfileLoginViewSet from bkuser_core.tests.apis.utils import get_api_factory from bkuser_core.tests.utils import make_simple_category, make_simple_profile from bkuser_core.user_settings.models import Setting pytestmark = pytest.mark.django_db class TestListCreateApis: @pytest.fixture(scope="class") def factory(self): return get_api_factory({"HTTP_RAW_USERNAME": False}) @pytest.fixture(scope="class") def check_view(self): return ProfileLoginViewSet.as_view({"post": "login"}) @pytest.fixture(scope="class") def query_view(self): return ProfileLoginViewSet.as_view({"post": "batch_query"}) @pytest.fixture(scope="class") def upsert_view(self): return ProfileLoginViewSet.as_view({"post": "upsert"}) @property def required_return_key(self): return [ "username", "email", "telephone", "wx_userid", "domain", "status", "staff_status", ] def _assert_required_keys_exist(self, response_data: dict): for i in self.required_return_key: assert i in response_data def test_check(self, factory, check_view): """测试登录校验""" make_simple_profile( username="logintest", force_create_params={"password": make_password("<PASSWORD>"), "password_update_time": now()}, ) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "default.local"}, ) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) def test_other_field_check(self, factory, check_view): """测试使用其他字段登录""" make_simple_profile( username="logintest", force_create_params={ "password": make_password("<PASSWORD>"), "email": "haha@haha", "telephone": "12345", "password_update_time": now(), }, ) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) request = factory.post("/api/v1/login/check/", data={"username": "12345", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) request = factory.post("/api/v1/login/check/", data={"username": "haha@haha", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) def test_other_field_duplicate(self, factory, check_view): """测试使用其他字段登录重复问题""" make_simple_profile( username="logintest", force_create_params={ "password": make_password("<PASSWORD>"), "email": "haha@haha", "telephone": "12345", "password_update_time": now(), }, ) make_simple_profile( username="logintest1", force_create_params={ "password": make_password("<PASSWORD>"), "email": "haha@haha", "telephone": "12345", "password_update_time": now(), }, ) # 实际上是这些字段重复了,但是会模糊错误返回 request = factory.post("/api/v1/login/check/", data={"username": "12345", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" request = factory.post("/api/v1/login/check/", data={"username": "haha@haha", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" def test_multiple_domain_check(self, factory, check_view): """测试多域登录检验""" ca = make_simple_category(domain="testdomain", display_name="测试") ca.make_default_settings() make_simple_profile( username="logintest", force_create_params={ "password": <PASSWORD>_password("<PASSWORD>"), "domain": "testdomain", "category_id": ca.id, "password_update_time": now(), }, ) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 多域正常 request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["username"] == "logintest@testdomain" self._assert_required_keys_exist(response.data) def test_check_error(self, factory, check_view): """测试多域登录失败检验""" ca = make_simple_category(domain="testdomain", display_name="测试") ca.make_default_settings() p = make_simple_profile( username="logintest", force_create_params={ "password": make_password("<PASSWORD>"), "domain": "testdomain", "category_id": ca.id, "password_update_time": now(), }, ) # 未知登录域 request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "xxxx"}, ) response = check_view(request=request) assert response.data["code"] == "DOMAIN_UNKNOWN" # 已禁用登录域 ca.enabled = False ca.status = CategoryStatus.INACTIVE.value ca.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "CATEGORY_NOT_ENABLED" ca.enabled = True ca.status = CategoryStatus.NORMAL.value ca.save() # 普通密码错误 request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 用户被锁 p.status = ProfileStatus.LOCKED.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 用户被禁用 p.status = ProfileStatus.DISABLED.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 超级用户不判断用户状态 p.role = RoleCodeEnum.SUPERUSER.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data self._assert_required_keys_exist(response.data) p.role = RoleCodeEnum.STAFF.value p.save() # 用户密码过期 p.password_update_time = now() - datetime.timedelta(days=3 * 365) p.password_valid_days = 1 p.status = ProfileStatus.NORMAL.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_EXPIRED" # 初始化密码需要修改 p.password_update_time = None p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "SHOULD_CHANGE_INITIAL_PASSWORD" def test_check_auto_lock(self, factory, check_view): """测试多次错误自动锁定""" make_simple_profile( username="logintest", force_create_params={"password": make_password("<PASSWORD>"), "password_update_time": now()}, ) auto_unlock_seconds = Setting.objects.get(category__id=1, meta__key="auto_unlock_seconds") auto_unlock_seconds.value = 2 auto_unlock_seconds.save() max_trail_times = Setting.objects.get(category__id=1, meta__key="max_trail_times") max_trail_times.value = 1 max_trail_times.save() request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 确保解锁了 time.sleep(2) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) self._assert_required_keys_exist(response.data) def test_batch_query(self): """测试批量查询""" # request = self.factory.post('/api/v2/profiles/') def test_upsert(self, factory, upsert_view): """测试更新插入""" body = {"username": "xxx", "domain": "default.local"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["username"] == "xxx" assert resp.data["domain"] == "default.local" assert resp.data["category_id"] == 1 def test_upsert_wrong_username(self, factory, upsert_view): """测试更新插入""" body = {"username": "xxx@xxx", "domain": "default.local"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["code"] == "VALIDATION_ERROR" def test_upsert_default_category(self, factory, upsert_view): body = {"username": "xxx"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["username"] == "xxx" assert resp.data["domain"] == "default.local" assert resp.data["category_id"] == 1 @pytest.mark.parametrize( "username, domain, expected", [ ("xxx", "abcd", "DOMAIN_UNKNOWN"), ("xxx", "abcd__dd", "VALIDATION_ERROR"), ("xxx", "ab@__dd", "VALIDATION_ERROR"), ], ) def test_upsert_with_domain(self, factory, upsert_view, username, domain, expected): body = {"username": username, "domain": domain} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["code"] == expected def test_upsert_username_contain_domain(self, factory, upsert_view): body = {"username": "<EMAIL>"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["username"] == "xxx" assert resp.data["domain"] == "default.local"
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import datetime import time import pytest from django.contrib.auth.hashers import make_password from django.utils.timezone import now from bkuser_core.categories.constants import CategoryStatus from bkuser_core.profiles.constants import ProfileStatus, RoleCodeEnum from bkuser_core.profiles.v2.views import ProfileLoginViewSet from bkuser_core.tests.apis.utils import get_api_factory from bkuser_core.tests.utils import make_simple_category, make_simple_profile from bkuser_core.user_settings.models import Setting pytestmark = pytest.mark.django_db class TestListCreateApis: @pytest.fixture(scope="class") def factory(self): return get_api_factory({"HTTP_RAW_USERNAME": False}) @pytest.fixture(scope="class") def check_view(self): return ProfileLoginViewSet.as_view({"post": "login"}) @pytest.fixture(scope="class") def query_view(self): return ProfileLoginViewSet.as_view({"post": "batch_query"}) @pytest.fixture(scope="class") def upsert_view(self): return ProfileLoginViewSet.as_view({"post": "upsert"}) @property def required_return_key(self): return [ "username", "email", "telephone", "wx_userid", "domain", "status", "staff_status", ] def _assert_required_keys_exist(self, response_data: dict): for i in self.required_return_key: assert i in response_data def test_check(self, factory, check_view): """测试登录校验""" make_simple_profile( username="logintest", force_create_params={"password": make_password("<PASSWORD>"), "password_update_time": now()}, ) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "default.local"}, ) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) def test_other_field_check(self, factory, check_view): """测试使用其他字段登录""" make_simple_profile( username="logintest", force_create_params={ "password": make_password("<PASSWORD>"), "email": "haha@haha", "telephone": "12345", "password_update_time": now(), }, ) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) request = factory.post("/api/v1/login/check/", data={"username": "12345", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) request = factory.post("/api/v1/login/check/", data={"username": "haha@haha", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data assert response.data["username"] == "logintest" self._assert_required_keys_exist(response.data) def test_other_field_duplicate(self, factory, check_view): """测试使用其他字段登录重复问题""" make_simple_profile( username="logintest", force_create_params={ "password": make_password("<PASSWORD>"), "email": "haha@haha", "telephone": "12345", "password_update_time": now(), }, ) make_simple_profile( username="logintest1", force_create_params={ "password": make_password("<PASSWORD>"), "email": "haha@haha", "telephone": "12345", "password_update_time": now(), }, ) # 实际上是这些字段重复了,但是会模糊错误返回 request = factory.post("/api/v1/login/check/", data={"username": "12345", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" request = factory.post("/api/v1/login/check/", data={"username": "haha@haha", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" def test_multiple_domain_check(self, factory, check_view): """测试多域登录检验""" ca = make_simple_category(domain="testdomain", display_name="测试") ca.make_default_settings() make_simple_profile( username="logintest", force_create_params={ "password": <PASSWORD>_password("<PASSWORD>"), "domain": "testdomain", "category_id": ca.id, "password_update_time": now(), }, ) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 多域正常 request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["username"] == "logintest@testdomain" self._assert_required_keys_exist(response.data) def test_check_error(self, factory, check_view): """测试多域登录失败检验""" ca = make_simple_category(domain="testdomain", display_name="测试") ca.make_default_settings() p = make_simple_profile( username="logintest", force_create_params={ "password": make_password("<PASSWORD>"), "domain": "testdomain", "category_id": ca.id, "password_update_time": now(), }, ) # 未知登录域 request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "xxxx"}, ) response = check_view(request=request) assert response.data["code"] == "DOMAIN_UNKNOWN" # 已禁用登录域 ca.enabled = False ca.status = CategoryStatus.INACTIVE.value ca.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "CATEGORY_NOT_ENABLED" ca.enabled = True ca.status = CategoryStatus.NORMAL.value ca.save() # 普通密码错误 request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 用户被锁 p.status = ProfileStatus.LOCKED.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 用户被禁用 p.status = ProfileStatus.DISABLED.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 超级用户不判断用户状态 p.role = RoleCodeEnum.SUPERUSER.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data self._assert_required_keys_exist(response.data) p.role = RoleCodeEnum.STAFF.value p.save() # 用户密码过期 p.password_update_time = now() - datetime.timedelta(days=3 * 365) p.password_valid_days = 1 p.status = ProfileStatus.NORMAL.value p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "PASSWORD_EXPIRED" # 初始化密码需要修改 p.password_update_time = None p.save() request = factory.post( "/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>", "domain": "testdomain"}, ) response = check_view(request=request) assert response.data["code"] == "SHOULD_CHANGE_INITIAL_PASSWORD" def test_check_auto_lock(self, factory, check_view): """测试多次错误自动锁定""" make_simple_profile( username="logintest", force_create_params={"password": make_password("<PASSWORD>"), "password_update_time": now()}, ) auto_unlock_seconds = Setting.objects.get(category__id=1, meta__key="auto_unlock_seconds") auto_unlock_seconds.value = 2 auto_unlock_seconds.save() max_trail_times = Setting.objects.get(category__id=1, meta__key="max_trail_times") max_trail_times.value = 1 max_trail_times.save() request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) assert response.data["code"] == "PASSWORD_ERROR" # 确保解锁了 time.sleep(2) request = factory.post("/api/v1/login/check/", data={"username": "logintest", "password": "<PASSWORD>"}) response = check_view(request=request) self._assert_required_keys_exist(response.data) def test_batch_query(self): """测试批量查询""" # request = self.factory.post('/api/v2/profiles/') def test_upsert(self, factory, upsert_view): """测试更新插入""" body = {"username": "xxx", "domain": "default.local"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["username"] == "xxx" assert resp.data["domain"] == "default.local" assert resp.data["category_id"] == 1 def test_upsert_wrong_username(self, factory, upsert_view): """测试更新插入""" body = {"username": "xxx@xxx", "domain": "default.local"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["code"] == "VALIDATION_ERROR" def test_upsert_default_category(self, factory, upsert_view): body = {"username": "xxx"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["username"] == "xxx" assert resp.data["domain"] == "default.local" assert resp.data["category_id"] == 1 @pytest.mark.parametrize( "username, domain, expected", [ ("xxx", "abcd", "DOMAIN_UNKNOWN"), ("xxx", "abcd__dd", "VALIDATION_ERROR"), ("xxx", "ab@__dd", "VALIDATION_ERROR"), ], ) def test_upsert_with_domain(self, factory, upsert_view, username, domain, expected): body = {"username": username, "domain": domain} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["code"] == expected def test_upsert_username_contain_domain(self, factory, upsert_view): body = {"username": "<EMAIL>"} req = factory.post("/api/v1/login/profile/query/", body) resp = upsert_view(request=req) assert resp.data["username"] == "xxx" assert resp.data["domain"] == "default.local"
en
0.546383
# -*- coding: utf-8 -*- TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 测试登录校验 测试使用其他字段登录 测试使用其他字段登录重复问题 # 实际上是这些字段重复了,但是会模糊错误返回 测试多域登录检验 # 多域正常 测试多域登录失败检验 # 未知登录域 # 已禁用登录域 # 普通密码错误 # 用户被锁 # 用户被禁用 # 超级用户不判断用户状态 # 用户密码过期 # 初始化密码需要修改 测试多次错误自动锁定 # 确保解锁了 测试批量查询 # request = self.factory.post('/api/v2/profiles/') 测试更新插入 测试更新插入
1.797201
2
frappe/query_builder/__init__.py
techo0001/Frappe-v13.19
0
6629834
<filename>frappe/query_builder/__init__.py from frappe.query_builder.terms import ParameterizedValueWrapper, ParameterizedFunction import pypika pypika.terms.ValueWrapper = ParameterizedValueWrapper pypika.terms.Function = ParameterizedFunction from pypika import * from frappe.query_builder.utils import DocType, get_query_builder, patch_query_execute
<filename>frappe/query_builder/__init__.py from frappe.query_builder.terms import ParameterizedValueWrapper, ParameterizedFunction import pypika pypika.terms.ValueWrapper = ParameterizedValueWrapper pypika.terms.Function = ParameterizedFunction from pypika import * from frappe.query_builder.utils import DocType, get_query_builder, patch_query_execute
none
1
1.467013
1
bin/_run.py
ssebs/ssebsms
2
6629835
<reponame>ssebs/ssebsms<filename>bin/_run.py ### # ssebsMS.py - ssebsMS cli utility # (c) 2018 - <NAME> - FOSS MIT License ### ## # This file should be ran by the ssebsMS.py file ## import http.server import socketserver import os def srv(site_name,port): os.chdir("./" + site_name + "/public/") handler = http.server.SimpleHTTPRequestHandler httpd = socketserver.TCPServer(("", port), handler) print("Serving at http://localhost:{}".format(port)) httpd.serve_forever() # end srv() def run_entry(site_name="test",port=8008): srv(site_name,port) # end run_entry()
### # ssebsMS.py - ssebsMS cli utility # (c) 2018 - <NAME> - FOSS MIT License ### ## # This file should be ran by the ssebsMS.py file ## import http.server import socketserver import os def srv(site_name,port): os.chdir("./" + site_name + "/public/") handler = http.server.SimpleHTTPRequestHandler httpd = socketserver.TCPServer(("", port), handler) print("Serving at http://localhost:{}".format(port)) httpd.serve_forever() # end srv() def run_entry(site_name="test",port=8008): srv(site_name,port) # end run_entry()
en
0.535338
### # ssebsMS.py - ssebsMS cli utility # (c) 2018 - <NAME> - FOSS MIT License ### ## # This file should be ran by the ssebsMS.py file ## # end srv() # end run_entry()
2.17025
2
tehbot/plugins/botstats.py
tehron/tehbot
6
6629836
<filename>tehbot/plugins/botstats.py from tehbot.plugins import * import psutil import os import time import platform class BotStatsPlugin(StandardCommand): """Shows various information about tehbot""" def commands(self): return "botstats" @staticmethod def format_time(ts): years = int(ts // 31536000) ts -= 31536000 * years days = int(ts // 86400) ts -= 86400 * days hours = int(ts // 3600) ts -= 3600 * hours mins = int(ts // 60) ts -= 60 * mins secs = int(ts) if years: out = "%dy %dd %02d:%02d" % (years, days, hours, mins) elif days: out = "%dd %02d:%02d" % (days, hours, mins) elif mins >= 15: out = "%02d:%02d" % (hours, mins) else: out = "00:%02d:%02d" % (mins, secs) return out @staticmethod def get_git_version(): from subprocess import Popen, PIPE import re out, err = Popen(["git", "log", "-n", "1"], stdout=PIPE, text=True).communicate() return re.search(r'([0-9A-Fa-f]{40})', out).group(0) def execute_parsed(self, connection, event, extra): txt = "\x0303[tehbot]\x03 " stats = [] stats.append("Version: git %s" % BotStatsPlugin.get_git_version()[:10]) #stats.append("Version: 0.2.1") proc = psutil.Process(os.getpid()) stats.append("Running Time: %s" % BotStatsPlugin.format_time(time.time() - proc.create_time())) stats.append("Memory Usage: %d kB" % (proc.memory_info().rss // 1024)) stats.append("Nr. of Threads: %d" % (proc.num_threads())) stats.append("Python %s" % platform.python_version()) return txt + ", ".join(stats)
<filename>tehbot/plugins/botstats.py from tehbot.plugins import * import psutil import os import time import platform class BotStatsPlugin(StandardCommand): """Shows various information about tehbot""" def commands(self): return "botstats" @staticmethod def format_time(ts): years = int(ts // 31536000) ts -= 31536000 * years days = int(ts // 86400) ts -= 86400 * days hours = int(ts // 3600) ts -= 3600 * hours mins = int(ts // 60) ts -= 60 * mins secs = int(ts) if years: out = "%dy %dd %02d:%02d" % (years, days, hours, mins) elif days: out = "%dd %02d:%02d" % (days, hours, mins) elif mins >= 15: out = "%02d:%02d" % (hours, mins) else: out = "00:%02d:%02d" % (mins, secs) return out @staticmethod def get_git_version(): from subprocess import Popen, PIPE import re out, err = Popen(["git", "log", "-n", "1"], stdout=PIPE, text=True).communicate() return re.search(r'([0-9A-Fa-f]{40})', out).group(0) def execute_parsed(self, connection, event, extra): txt = "\x0303[tehbot]\x03 " stats = [] stats.append("Version: git %s" % BotStatsPlugin.get_git_version()[:10]) #stats.append("Version: 0.2.1") proc = psutil.Process(os.getpid()) stats.append("Running Time: %s" % BotStatsPlugin.format_time(time.time() - proc.create_time())) stats.append("Memory Usage: %d kB" % (proc.memory_info().rss // 1024)) stats.append("Nr. of Threads: %d" % (proc.num_threads())) stats.append("Python %s" % platform.python_version()) return txt + ", ".join(stats)
en
0.484224
Shows various information about tehbot #stats.append("Version: 0.2.1")
2.533336
3
test/functional/tool_signet_miner.py
rag-hav/bitcoin
6
6629837
#!/usr/bin/env python3 # Copyright (c) 2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test signet miner tool""" import os.path import subprocess import sys import time from test_framework.key import ECKey from test_framework.script_util import key_to_p2wpkh_script from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal from test_framework.wallet_util import bytes_to_wif CHALLENGE_PRIVATE_KEY = (42).to_bytes(32, 'big') class SignetMinerTest(BitcoinTestFramework): def set_test_params(self): self.chain = "signet" self.setup_clean_chain = True self.num_nodes = 1 # generate and specify signet challenge (simple p2wpkh script) privkey = ECKey() privkey.set(CHALLENGE_PRIVATE_KEY, True) pubkey = privkey.get_pubkey().get_bytes() challenge = key_to_p2wpkh_script(pubkey) self.extra_args = [[f'-signetchallenge={challenge.hex()}']] def skip_test_if_missing_module(self): self.skip_if_no_cli() self.skip_if_no_wallet() self.skip_if_no_bitcoin_util() def run_test(self): node = self.nodes[0] # import private key needed for signing block node.importprivkey(bytes_to_wif(CHALLENGE_PRIVATE_KEY)) # generate block with signet miner tool base_dir = self.config["environment"]["SRCDIR"] signet_miner_path = os.path.join(base_dir, "contrib", "signet", "miner") subprocess.run([ sys.executable, signet_miner_path, f'--cli={node.cli.binary} -datadir={node.cli.datadir}', 'generate', f'--address={node.getnewaddress()}', f'--grind-cmd={self.options.bitcoinutil} grind', '--nbits=1d00ffff', f'--set-block-time={int(time.time())}', ], check=True, stderr=subprocess.STDOUT) assert_equal(node.getblockcount(), 1) if __name__ == "__main__": SignetMinerTest().main()
#!/usr/bin/env python3 # Copyright (c) 2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test signet miner tool""" import os.path import subprocess import sys import time from test_framework.key import ECKey from test_framework.script_util import key_to_p2wpkh_script from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal from test_framework.wallet_util import bytes_to_wif CHALLENGE_PRIVATE_KEY = (42).to_bytes(32, 'big') class SignetMinerTest(BitcoinTestFramework): def set_test_params(self): self.chain = "signet" self.setup_clean_chain = True self.num_nodes = 1 # generate and specify signet challenge (simple p2wpkh script) privkey = ECKey() privkey.set(CHALLENGE_PRIVATE_KEY, True) pubkey = privkey.get_pubkey().get_bytes() challenge = key_to_p2wpkh_script(pubkey) self.extra_args = [[f'-signetchallenge={challenge.hex()}']] def skip_test_if_missing_module(self): self.skip_if_no_cli() self.skip_if_no_wallet() self.skip_if_no_bitcoin_util() def run_test(self): node = self.nodes[0] # import private key needed for signing block node.importprivkey(bytes_to_wif(CHALLENGE_PRIVATE_KEY)) # generate block with signet miner tool base_dir = self.config["environment"]["SRCDIR"] signet_miner_path = os.path.join(base_dir, "contrib", "signet", "miner") subprocess.run([ sys.executable, signet_miner_path, f'--cli={node.cli.binary} -datadir={node.cli.datadir}', 'generate', f'--address={node.getnewaddress()}', f'--grind-cmd={self.options.bitcoinutil} grind', '--nbits=1d00ffff', f'--set-block-time={int(time.time())}', ], check=True, stderr=subprocess.STDOUT) assert_equal(node.getblockcount(), 1) if __name__ == "__main__": SignetMinerTest().main()
en
0.570043
#!/usr/bin/env python3 # Copyright (c) 2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Test signet miner tool # generate and specify signet challenge (simple p2wpkh script) # import private key needed for signing block # generate block with signet miner tool
2.051924
2
dist/weewx-4.3.0b3/bin/weewx/xtypes.py
v0rts/docker-weewx
10
6629838
# # Copyright (c) 2019-2020 <NAME> <<EMAIL>> # # See the file LICENSE.txt for your full rights. # """User-defined extensions to the WeeWX type system""" import math import weedb import weeutil.weeutil import weewx import weewx.units import weewx.wxformulas from weeutil.weeutil import isStartOfDay from weewx.units import ValueTuple # A list holding the type extensions. Each entry should be a subclass of XType, defined below. xtypes = [] class XType(object): """Base class for extensions to the WeeWX type system.""" def get_scalar(self, obs_type, record, db_manager=None): """Calculate a scalar. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the type is not there. """ raise weewx.UnknownType def get_series(self, obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Calculate a series, possibly with aggregation. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the series is not there. """ raise weewx.UnknownType def get_aggregate(self, obs_type, timespan, aggregate_type, db_manager, **option_dict): """Calculate an aggregation. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.UnknownAggregation` if the aggregation type `aggregate_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the type is not there. """ raise weewx.UnknownAggregation def shut_down(self): """Opportunity to do any clean up.""" pass # ##################### Retrieval functions ########################### def get_scalar(obs_type, record, db_manager=None): """Return a scalar value""" # Search the list, looking for a get_scalar() method that does not raise an exception for xtype in xtypes: try: # Try this function. It will raise an exception if it does not know about the type. return xtype.get_scalar(obs_type, record, db_manager) except weewx.UnknownType: # This function does not know about the type. Move on to the next one. pass # None of the functions worked. raise weewx.UnknownType(obs_type) def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Return a series (aka vector) of, possibly aggregated, values.""" # Search the list, looking for a get_series() method that does not raise an exception for xtype in xtypes: try: # Try this function. It will raise an exception if it does not know about the type. return xtype.get_series(obs_type, timespan, db_manager, aggregate_type, aggregate_interval) except weewx.UnknownType: # This function does not know about the type. Move on to the next one. pass # None of the functions worked. raise weewx.UnknownType(obs_type) def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Calculate an aggregation over a timespan""" # Search the list, looking for a get_aggregate() method that does not raise an exception for xtype in xtypes: try: # Try this function. It will raise an exception if it doesn't know about the type of # aggregation. return xtype.get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict) except (weewx.UnknownAggregation, weewx.UnknownType): pass raise weewx.UnknownAggregation("%s('%s')" % (aggregate_type, obs_type)) # # ######################## Class ArchiveTable ############################## # class ArchiveTable(XType): """Calculate types and aggregates directly from the archive table""" @staticmethod def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Get a series, possibly with aggregation, from the main archive database. The general strategy is that if aggregation is asked for, chop the series up into separate chunks, calculating the aggregate for each chunk. Then assemble the results. If no aggregation is called for, just return the data directly out of the database. """ startstamp, stopstamp = timespan start_vec = list() stop_vec = list() data_vec = list() if aggregate_type: # With aggregation unit, unit_group = None, None if aggregate_type == 'cumulative': do_aggregate = 'sum' total = 0 else: do_aggregate = aggregate_type for stamp in weeutil.weeutil.intervalgen(startstamp, stopstamp, aggregate_interval): # Get the aggregate as a ValueTuple agg_vt = get_aggregate(obs_type, stamp, do_aggregate, db_manager) if unit: # It's OK if the unit is unknown (=None). if agg_vt[1] is not None and (unit != agg_vt[1] or unit_group != agg_vt[2]): raise weewx.UnsupportedFeature("Cannot change unit groups " "within an aggregation.") else: unit, unit_group = agg_vt[1], agg_vt[2] start_vec.append(stamp.start) stop_vec.append(stamp.stop) if aggregate_type == 'cumulative': if agg_vt[0] is not None: total += agg_vt[0] data_vec.append(total) else: data_vec.append(agg_vt[0]) else: # No aggregation sql_str = "SELECT dateTime, %s, usUnits, `interval` FROM %s " \ "WHERE dateTime >= ? AND dateTime <= ?" % (obs_type, db_manager.table_name) std_unit_system = None # Hit the database. It's possible the type is not in the database, so be prepared # to catch a NoColumnError: try: for record in db_manager.genSql(sql_str, (startstamp, stopstamp)): # Unpack the record timestamp, value, unit_system, interval = record if std_unit_system: if std_unit_system != unit_system: raise weewx.UnsupportedFeature("Unit type cannot change " "within an aggregation interval.") else: std_unit_system = unit_system start_vec.append(timestamp - interval * 60) stop_vec.append(timestamp) data_vec.append(value) except weedb.NoColumnError: # The sql type doesn't exist. Convert to an UnknownType error raise weewx.UnknownType(obs_type) unit, unit_group = weewx.units.getStandardUnitType(std_unit_system, obs_type, aggregate_type) return (ValueTuple(start_vec, 'unix_epoch', 'group_time'), ValueTuple(stop_vec, 'unix_epoch', 'group_time'), ValueTuple(data_vec, unit, unit_group)) # Set of SQL statements to be used for calculating aggregates from the main archive table. agg_sql_dict = { 'diff': "SELECT (b.%(obs_type)s - a.%(obs_type)s) FROM archive a, archive b " "WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive " "WHERE dateTime <= %(stop)s) " "AND a.dateTime = (SELECT MIN(dateTime) FROM archive " "WHERE dateTime >= %(start)s);", 'first': "SELECT %(obs_type)s FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY dateTime ASC LIMIT 1", 'firsttime': "SELECT MIN(dateTime) FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL", 'last': "SELECT %(obs_type)s FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY dateTime DESC LIMIT 1", 'lasttime': "SELECT MAX(dateTime) FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL", 'maxtime': "SELECT dateTime FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY %(obs_type)s DESC LIMIT 1", 'mintime': "SELECT dateTime FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY %(obs_type)s ASC LIMIT 1", 'tderiv': "SELECT (b.%(obs_type)s - a.%(obs_type)s) / (b.dateTime-a.dateTime) " "FROM archive a, archive b " "WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive " "WHERE dateTime <= %(stop)s) " "AND a.dateTime = (SELECT MIN(dateTime) FROM archive " "WHERE dateTime >= %(start)s);", } simple_agg_sql = "SELECT %(aggregate_type)s(%(obs_type)s) FROM %(table_name)s " \ "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " \ "AND %(obs_type)s IS NOT NULL" @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns an aggregation of an observation type over a given time period, using the main archive table. obs_type: The type over which aggregation is to be done (e.g., 'barometer', 'outTemp', 'rain', ...) timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result.""" if aggregate_type not in ['sum', 'count', 'avg', 'max', 'min'] \ + list(ArchiveTable.agg_sql_dict.keys()): raise weewx.UnknownAggregation(aggregate_type) interpolate_dict = { 'aggregate_type': aggregate_type, 'obs_type': obs_type, 'table_name': db_manager.table_name, 'start': timespan.start, 'stop': timespan.stop } select_stmt = ArchiveTable.agg_sql_dict.get(aggregate_type, ArchiveTable.simple_agg_sql) % interpolate_dict try: row = db_manager.getSql(select_stmt) except weedb.NoColumnError: raise weewx.UnknownType(aggregate_type) value = row[0] if row else None # Look up the unit type and group of this combination of observation type and aggregation: u, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Time derivatives have special rules. For example, the time derivative of watt-hours is # watts, scaled by the number of seconds in an hour. The unit group also changes to # group_power. if aggregate_type == 'tderiv': if u == 'watt_second': u = 'watt' elif u == 'watt_hour': u = 'watt' value *= 3600 elif u == 'kilowatt_hour': u = 'kilowatt' value *= 3600 g = 'group_power' # Form the ValueTuple and return it: return weewx.units.ValueTuple(value, u, g) # # ######################## Class DailySummaries ############################## # class DailySummaries(XType): """Calculate from the daily summaries.""" # Set of SQL statements to be used for calculating aggregates from the daily summaries. daily_sql_dict = { 'avg': "SELECT SUM(wsum),SUM(sumtime) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'count': "SELECT SUM(count) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'gustdir': "SELECT max_dir FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY max DESC, maxtime ASC LIMIT 1", 'max': "SELECT MAX(max) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'max_ge': "SELECT SUM(max >= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'max_le': "SELECT SUM(max <= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'maxmin': "SELECT MAX(min) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'maxmintime': "SELECT mintime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY min DESC, mintime ASC LIMIT 1", 'maxsum': "SELECT MAX(sum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'maxsumtime': "SELECT maxtime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY sum DESC, maxtime ASC LIMIT 1", 'maxtime': "SELECT maxtime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY max DESC, maxtime ASC LIMIT 1", 'meanmax': "SELECT AVG(max) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'meanmin': "SELECT AVG(min) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'min': "SELECT MIN(min) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'min_ge': "SELECT SUM(min >= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'min_le': "SELECT SUM(min <= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'minmax': "SELECT MIN(max) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'minmaxtime': "SELECT maxtime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY max ASC, maxtime ASC ", 'minsum': "SELECT MIN(sum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'minsumtime': "SELECT mintime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY sum ASC, mintime ASC LIMIT 1", 'mintime': "SELECT mintime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY min ASC, mintime ASC LIMIT 1", 'rms': "SELECT SUM(wsquaresum),SUM(sumtime) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'sum': "SELECT SUM(sum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'sum_ge': "SELECT SUM(sum >= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'sum_le': "SELECT SUM(sum <= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'vecavg': "SELECT SUM(xsum),SUM(ysum),SUM(sumtime) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'vecdir': "SELECT SUM(xsum),SUM(ysum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", } @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns an aggregation of a statistical type for a given time period, by using the daily summaries. obs_type: The type over which aggregation is to be done (e.g., 'barometer', 'outTemp', 'rain', ...) timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result.""" # Check to see if this is a valid daily summary type: if not hasattr(db_manager, 'daykeys') or obs_type not in db_manager.daykeys: raise weewx.UnknownType(obs_type) aggregate_type = aggregate_type.lower() # Raise exception if we don't know about this type of aggregation if aggregate_type not in DailySummaries.daily_sql_dict: raise weewx.UnknownAggregation(aggregate_type) # We cannot use the day summaries if the starting and ending times of the aggregation # interval are not on midnight boundaries, and are not the first or last records in the # database. if db_manager.first_timestamp is None or db_manager.last_timestamp is None: raise weewx.UnknownAggregation(aggregate_type) if not (isStartOfDay(timespan.start) or timespan.start == db_manager.first_timestamp) \ or not (isStartOfDay(timespan.stop) or timespan.stop == db_manager.last_timestamp): raise weewx.UnknownAggregation(aggregate_type) val = option_dict.get('val') if val is None: target_val = None else: # The following is for backwards compatibility when ValueTuples had # just two members. This hack avoids breaking old skins. if len(val) == 2: if val[1] in ['degree_F', 'degree_C']: val += ("group_temperature",) elif val[1] in ['inch', 'mm', 'cm']: val += ("group_rain",) target_val = weewx.units.convertStd(val, db_manager.std_unit_system)[0] # Form the interpolation dictionary inter_dict = { 'start': weeutil.weeutil.startOfDay(timespan.start), 'stop': timespan.stop, 'obs_key': obs_type, 'aggregate_type': aggregate_type, 'val': target_val, 'table_name': db_manager.table_name } # Run the query against the database: row = db_manager.getSql(DailySummaries.daily_sql_dict[aggregate_type] % inter_dict) # Each aggregation type requires a slightly different calculation. if not row or None in row: # If no row was returned, or if it contains any nulls (meaning that not # all required data was available to calculate the requested aggregate), # then set the resulting value to None. value = None elif aggregate_type in ['min', 'maxmin', 'max', 'minmax', 'meanmin', 'meanmax', 'maxsum', 'minsum', 'sum', 'gustdir']: # These aggregates are passed through 'as is'. value = row[0] elif aggregate_type in ['mintime', 'maxmintime', 'maxtime', 'minmaxtime', 'maxsumtime', 'minsumtime', 'count', 'max_ge', 'max_le', 'min_ge', 'min_le', 'sum_ge', 'sum_le']: # These aggregates are always integers: value = int(row[0]) elif aggregate_type == 'avg': value = row[0] / row[1] if row[1] else None elif aggregate_type == 'rms': value = math.sqrt(row[0] / row[1]) if row[1] else None elif aggregate_type == 'vecavg': value = math.sqrt((row[0] ** 2 + row[1] ** 2) / row[2] ** 2) if row[2] else None elif aggregate_type == 'vecdir': if row == (0.0, 0.0): value = None else: deg = 90.0 - math.degrees(math.atan2(row[1], row[0])) value = deg if deg >= 0 else deg + 360.0 else: # Unknown aggregation. Should not have gotten this far... raise ValueError("Unexpected error. Aggregate type '%s'" % aggregate_type) # Look up the unit type and group of this combination of observation type and aggregation: t, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Form the ValueTuple and return it: return weewx.units.ValueTuple(value, t, g) # # ######################## Class AggregateHeatCool ############################## # class AggregateHeatCool(XType): """Calculate heating and cooling degree-days.""" # Default base temperature and unit type for heating and cooling degree days, # as a value tuple default_heatbase = (65.0, "degree_F", "group_temperature") default_coolbase = (65.0, "degree_F", "group_temperature") default_growbase = (50.0, "degree_F", "group_temperature") @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns heating and cooling degree days over a time period. obs_type: The type over which aggregation is to be done. Must be one of 'heatdeg', 'cooldeg', or 'growdeg'. timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. Must be 'avg' or 'sum'. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. """ # Check to see whether heating or cooling degree days are being asked for: if obs_type not in ['heatdeg', 'cooldeg', 'growdeg']: raise weewx.UnknownType(obs_type) # Only summation (total) or average heating or cooling degree days is supported: if aggregate_type not in ['sum', 'avg']: raise weewx.UnknownAggregation(aggregate_type) # Get the base for heating and cooling degree-days units_dict = option_dict.get('skin_dict', {}).get('Units', {}) dd_dict = units_dict.get('DegreeDays', {}) heatbase = dd_dict.get('heating_base', AggregateHeatCool.default_heatbase) coolbase = dd_dict.get('cooling_base', AggregateHeatCool.default_coolbase) growbase = dd_dict.get('growing_base', AggregateHeatCool.default_growbase) # Convert to a ValueTuple in the same unit system as the database heatbase_t = weewx.units.convertStd((float(heatbase[0]), heatbase[1], "group_temperature"), db_manager.std_unit_system) coolbase_t = weewx.units.convertStd((float(coolbase[0]), coolbase[1], "group_temperature"), db_manager.std_unit_system) growbase_t = weewx.units.convertStd((float(growbase[0]), growbase[1], "group_temperature"), db_manager.std_unit_system) total = 0.0 count = 0 for daySpan in weeutil.weeutil.genDaySpans(timespan.start, timespan.stop): # Get the average temperature for the day as a value tuple: Tavg_t = DailySummaries.get_aggregate('outTemp', daySpan, 'avg', db_manager) # Make sure it's valid before including it in the aggregation: if Tavg_t is not None and Tavg_t[0] is not None: if obs_type == 'heatdeg': total += weewx.wxformulas.heating_degrees(Tavg_t[0], heatbase_t[0]) elif obs_type == 'cooldeg': total += weewx.wxformulas.cooling_degrees(Tavg_t[0], coolbase_t[0]) else: total += weewx.wxformulas.cooling_degrees(Tavg_t[0], growbase_t[0]) count += 1 if aggregate_type == 'sum': value = total else: value = total / count if count else None # Look up the unit type and group of the result: t, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Return as a value tuple return weewx.units.ValueTuple(value, t, g) # ############################# WindVec extensions ######################################### class WindVec(XType): """Extensions for calculating special observation types 'windvec' and 'windgustvec'. It provides functions for calculating series, and for calculating aggregates. """ windvec_types = { 'windvec': ('windSpeed', 'windDir'), 'windgustvec': ('windGust', 'windGustDir') } agg_sql_dict = { 'count': "SELECT COUNT(dateTime), usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL)", 'first': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY dateTime ASC LIMIT 1", 'last': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY dateTime DESC LIMIT 1", 'min': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY %(mag)s ASC LIMIT 1;", 'max': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY %(mag)s DESC LIMIT 1;", } # for types 'avg', 'sum' complex_sql_wind = 'SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s WHERE dateTime > ? ' \ 'AND dateTime <= ?' @staticmethod def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Get a series, possibly with aggregation, for special 'wind vector' types. These are typically used for the wind vector plots. """ # Check to see if the requested type is not 'windvec' or 'windgustvec' if obs_type not in WindVec.windvec_types: # The type is not one of the extended wind types. We can't handle it. raise weewx.UnknownType(obs_type) # It is an extended wind type. Prepare the lists that will hold the # final results. start_vec = list() stop_vec = list() data_vec = list() # Is aggregation requested? if aggregate_type: # Yes. Just use the regular series function. When it comes time to do the aggregation, # the specialized function WindVec.get_aggregate() (defined below), will be used. return ArchiveTable.get_series(obs_type, timespan, db_manager, aggregate_type, aggregate_interval) else: # No aggregation desired. However, we have will have to assemble the wind vector from # its flattened types. This SQL select string will select the proper wind types sql_str = 'SELECT dateTime, %s, %s, usUnits, `interval` FROM %s ' \ 'WHERE dateTime >= ? AND dateTime <= ?' \ % (WindVec.windvec_types[obs_type][0], WindVec.windvec_types[obs_type][1], db_manager.table_name) std_unit_system = None for record in db_manager.genSql(sql_str, timespan): ts, magnitude, direction, unit_system, interval = record if std_unit_system: if std_unit_system != unit_system: raise weewx.UnsupportedFeature( "Unit type cannot change within a time interval.") else: std_unit_system = unit_system value = weeutil.weeutil.to_complex(magnitude, direction) start_vec.append(ts - interval * 60) stop_vec.append(ts) data_vec.append(value) unit, unit_group = weewx.units.getStandardUnitType(std_unit_system, obs_type, aggregate_type) return (ValueTuple(start_vec, 'unix_epoch', 'group_time'), ValueTuple(stop_vec, 'unix_epoch', 'group_time'), ValueTuple(data_vec, unit, unit_group)) @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns an aggregation of a wind vector type over a timespan by using the main archive table. obs_type: The type over which aggregation is to be done. For this function, it must be 'windvec' or 'windgustvec'. Anything else will cause weewx.UnknownType to be raised. timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. For this function, must be 'avg', 'sum', 'count', 'first', 'last', 'min', or 'max'. Anything else will cause weewx.UnknownAggregation to be raised. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. Note that the value contained in the ValueTuple will be a complex number. """ if obs_type not in WindVec.windvec_types: raise weewx.UnknownType(obs_type) aggregate_type = aggregate_type.lower() # Raise exception if we don't know about this type of aggregation if aggregate_type not in ['avg', 'sum'] + list(WindVec.agg_sql_dict.keys()): raise weewx.UnknownAggregation(aggregate_type) # Form the interpolation dictionary interpolation_dict = { 'dir': WindVec.windvec_types[obs_type][1], 'mag': WindVec.windvec_types[obs_type][0], 'start': timespan.start, 'stop': timespan.stop, 'table_name': db_manager.table_name } if aggregate_type in WindVec.agg_sql_dict: # For these types (e.g., first, last, etc.), we can do the aggregation in a SELECT # statement. select_stmt = WindVec.agg_sql_dict[aggregate_type] % interpolation_dict row = db_manager.getSql(select_stmt) if row: if aggregate_type == 'count': value, std_unit_system = row else: magnitude, direction, std_unit_system = row value = weeutil.weeutil.to_complex(magnitude, direction) else: std_unit_system = db_manager.std_unit_system value = None else: # The result is more complex, requiring vector arithmetic. We will have to do it # in Python std_unit_system = None xsum = ysum = 0.0 count = 0 select_stmt = WindVec.complex_sql_wind % interpolation_dict for rec in db_manager.genSql(select_stmt, timespan): # Unpack the record mag, direction, unit_system = rec # Ignore rows where magnitude is NULL if mag is None: continue # A good direction is necessary unless the mag is zero: if mag == 0.0 or direction is not None: if std_unit_system: if std_unit_system != unit_system: raise weewx.UnsupportedFeature( "Unit type cannot change within a time interval.") else: std_unit_system = unit_system # An undefined direction is OK (and expected) if the magnitude # is zero. But, in that case, it doesn't contribute to the sums either. if direction is None: # Sanity check if weewx.debug: assert (mag == 0.0) else: xsum += mag * math.cos(math.radians(90.0 - direction)) ysum += mag * math.sin(math.radians(90.0 - direction)) count += 1 # We've gone through the whole interval. Were there any good data? if count: # Form the requested aggregation: if aggregate_type == 'sum': value = complex(xsum, ysum) else: # Must be 'avg' value = complex(xsum, ysum) / count else: value = None # Look up the unit type and group of this combination of observation type and aggregation: t, g = weewx.units.getStandardUnitType(std_unit_system, obs_type, aggregate_type) # Form the ValueTuple and return it: return weewx.units.ValueTuple(value, t, g) class WindVecDaily(XType): """Extension for calculating the average windvec, using the daily summaries.""" @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Optimization for calculating 'avg' aggregations for type 'windvec'. The timespan must be on a daily boundary.""" # We can only do observation type 'windvec' if obs_type != 'windvec': # We can't handle it. raise weewx.UnknownType(obs_type) # We can only do 'avg'' if aggregate_type != 'avg': raise weewx.UnknownAggregation(aggregate_type) # We cannot use the day summaries if the starting and ending times of the aggregation # interval are not on midnight boundaries, and are not the first or last records in the # database. if not (isStartOfDay(timespan.start) or timespan.start == db_manager.first_timestamp) \ or not (isStartOfDay(timespan.stop) or timespan.stop == db_manager.last_timestamp): raise weewx.UnknownAggregation(aggregate_type) sql = 'SELECT SUM(xsum), SUM(ysum), SUM(dirsumtime) ' \ 'FROM %s_day_wind WHERE dateTime>=? AND dateTime<?;' % db_manager.table_name row = db_manager.getSql(sql, timespan) if not row or None in row or not row[2]: # If no row was returned, or if it contains any nulls (meaning that not # all required data was available to calculate the requested aggregate), # then set the resulting value to None. value = None else: value = complex(row[0], row[1]) / row[2] # Look up the unit type and group of the result: t, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Return as a value tuple return weewx.units.ValueTuple(value, t, g) class XTypeTable(XType): """Calculate a series for an xtype. An xtype may not necessarily be in the database, so this version calculates it on the fly. Note: this version only works if no aggregation has been requested.""" @staticmethod def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Get a series of an xtype, by using the main archive table. Works only for no aggregation. """ start_vec = list() stop_vec = list() data_vec = list() if aggregate_type: # This version does not know how to do aggregations, although this could be # added in the future. raise weewx.UnknownAggregation(aggregate_type) else: # No aggregation std_unit_system = None # Hit the database. for record in db_manager.genBatchRecords(*timespan): if std_unit_system: if std_unit_system != record['usUnits']: raise weewx.UnsupportedFeature("Unit system cannot change " "within a series.") else: std_unit_system = record['usUnits'] # Given a record, use the xtypes system to calculate a value: value = get_scalar(obs_type, record, db_manager) start_vec.append(record['dateTime']- record['interval'] * 60) stop_vec.append(record['dateTime']) data_vec.append(value[0]) unit, unit_group = weewx.units.getStandardUnitType(std_unit_system, obs_type) return (ValueTuple(start_vec, 'unix_epoch', 'group_time'), ValueTuple(stop_vec, 'unix_epoch', 'group_time'), ValueTuple(data_vec, unit, unit_group)) # Add instantiated versions to the extension list. Order matters. We want the highly-specialized # versions first, because they might offer optimizations. xtypes.append(WindVecDaily()) xtypes.append(WindVec()) xtypes.append(AggregateHeatCool()) xtypes.append(DailySummaries()) xtypes.append(ArchiveTable()) xtypes.append(XTypeTable())
# # Copyright (c) 2019-2020 <NAME> <<EMAIL>> # # See the file LICENSE.txt for your full rights. # """User-defined extensions to the WeeWX type system""" import math import weedb import weeutil.weeutil import weewx import weewx.units import weewx.wxformulas from weeutil.weeutil import isStartOfDay from weewx.units import ValueTuple # A list holding the type extensions. Each entry should be a subclass of XType, defined below. xtypes = [] class XType(object): """Base class for extensions to the WeeWX type system.""" def get_scalar(self, obs_type, record, db_manager=None): """Calculate a scalar. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the type is not there. """ raise weewx.UnknownType def get_series(self, obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Calculate a series, possibly with aggregation. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the series is not there. """ raise weewx.UnknownType def get_aggregate(self, obs_type, timespan, aggregate_type, db_manager, **option_dict): """Calculate an aggregation. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.UnknownAggregation` if the aggregation type `aggregate_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the type is not there. """ raise weewx.UnknownAggregation def shut_down(self): """Opportunity to do any clean up.""" pass # ##################### Retrieval functions ########################### def get_scalar(obs_type, record, db_manager=None): """Return a scalar value""" # Search the list, looking for a get_scalar() method that does not raise an exception for xtype in xtypes: try: # Try this function. It will raise an exception if it does not know about the type. return xtype.get_scalar(obs_type, record, db_manager) except weewx.UnknownType: # This function does not know about the type. Move on to the next one. pass # None of the functions worked. raise weewx.UnknownType(obs_type) def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Return a series (aka vector) of, possibly aggregated, values.""" # Search the list, looking for a get_series() method that does not raise an exception for xtype in xtypes: try: # Try this function. It will raise an exception if it does not know about the type. return xtype.get_series(obs_type, timespan, db_manager, aggregate_type, aggregate_interval) except weewx.UnknownType: # This function does not know about the type. Move on to the next one. pass # None of the functions worked. raise weewx.UnknownType(obs_type) def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Calculate an aggregation over a timespan""" # Search the list, looking for a get_aggregate() method that does not raise an exception for xtype in xtypes: try: # Try this function. It will raise an exception if it doesn't know about the type of # aggregation. return xtype.get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict) except (weewx.UnknownAggregation, weewx.UnknownType): pass raise weewx.UnknownAggregation("%s('%s')" % (aggregate_type, obs_type)) # # ######################## Class ArchiveTable ############################## # class ArchiveTable(XType): """Calculate types and aggregates directly from the archive table""" @staticmethod def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Get a series, possibly with aggregation, from the main archive database. The general strategy is that if aggregation is asked for, chop the series up into separate chunks, calculating the aggregate for each chunk. Then assemble the results. If no aggregation is called for, just return the data directly out of the database. """ startstamp, stopstamp = timespan start_vec = list() stop_vec = list() data_vec = list() if aggregate_type: # With aggregation unit, unit_group = None, None if aggregate_type == 'cumulative': do_aggregate = 'sum' total = 0 else: do_aggregate = aggregate_type for stamp in weeutil.weeutil.intervalgen(startstamp, stopstamp, aggregate_interval): # Get the aggregate as a ValueTuple agg_vt = get_aggregate(obs_type, stamp, do_aggregate, db_manager) if unit: # It's OK if the unit is unknown (=None). if agg_vt[1] is not None and (unit != agg_vt[1] or unit_group != agg_vt[2]): raise weewx.UnsupportedFeature("Cannot change unit groups " "within an aggregation.") else: unit, unit_group = agg_vt[1], agg_vt[2] start_vec.append(stamp.start) stop_vec.append(stamp.stop) if aggregate_type == 'cumulative': if agg_vt[0] is not None: total += agg_vt[0] data_vec.append(total) else: data_vec.append(agg_vt[0]) else: # No aggregation sql_str = "SELECT dateTime, %s, usUnits, `interval` FROM %s " \ "WHERE dateTime >= ? AND dateTime <= ?" % (obs_type, db_manager.table_name) std_unit_system = None # Hit the database. It's possible the type is not in the database, so be prepared # to catch a NoColumnError: try: for record in db_manager.genSql(sql_str, (startstamp, stopstamp)): # Unpack the record timestamp, value, unit_system, interval = record if std_unit_system: if std_unit_system != unit_system: raise weewx.UnsupportedFeature("Unit type cannot change " "within an aggregation interval.") else: std_unit_system = unit_system start_vec.append(timestamp - interval * 60) stop_vec.append(timestamp) data_vec.append(value) except weedb.NoColumnError: # The sql type doesn't exist. Convert to an UnknownType error raise weewx.UnknownType(obs_type) unit, unit_group = weewx.units.getStandardUnitType(std_unit_system, obs_type, aggregate_type) return (ValueTuple(start_vec, 'unix_epoch', 'group_time'), ValueTuple(stop_vec, 'unix_epoch', 'group_time'), ValueTuple(data_vec, unit, unit_group)) # Set of SQL statements to be used for calculating aggregates from the main archive table. agg_sql_dict = { 'diff': "SELECT (b.%(obs_type)s - a.%(obs_type)s) FROM archive a, archive b " "WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive " "WHERE dateTime <= %(stop)s) " "AND a.dateTime = (SELECT MIN(dateTime) FROM archive " "WHERE dateTime >= %(start)s);", 'first': "SELECT %(obs_type)s FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY dateTime ASC LIMIT 1", 'firsttime': "SELECT MIN(dateTime) FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL", 'last': "SELECT %(obs_type)s FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY dateTime DESC LIMIT 1", 'lasttime': "SELECT MAX(dateTime) FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL", 'maxtime': "SELECT dateTime FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY %(obs_type)s DESC LIMIT 1", 'mintime': "SELECT dateTime FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " "AND %(obs_type)s IS NOT NULL ORDER BY %(obs_type)s ASC LIMIT 1", 'tderiv': "SELECT (b.%(obs_type)s - a.%(obs_type)s) / (b.dateTime-a.dateTime) " "FROM archive a, archive b " "WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive " "WHERE dateTime <= %(stop)s) " "AND a.dateTime = (SELECT MIN(dateTime) FROM archive " "WHERE dateTime >= %(start)s);", } simple_agg_sql = "SELECT %(aggregate_type)s(%(obs_type)s) FROM %(table_name)s " \ "WHERE dateTime > %(start)s AND dateTime <= %(stop)s " \ "AND %(obs_type)s IS NOT NULL" @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns an aggregation of an observation type over a given time period, using the main archive table. obs_type: The type over which aggregation is to be done (e.g., 'barometer', 'outTemp', 'rain', ...) timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result.""" if aggregate_type not in ['sum', 'count', 'avg', 'max', 'min'] \ + list(ArchiveTable.agg_sql_dict.keys()): raise weewx.UnknownAggregation(aggregate_type) interpolate_dict = { 'aggregate_type': aggregate_type, 'obs_type': obs_type, 'table_name': db_manager.table_name, 'start': timespan.start, 'stop': timespan.stop } select_stmt = ArchiveTable.agg_sql_dict.get(aggregate_type, ArchiveTable.simple_agg_sql) % interpolate_dict try: row = db_manager.getSql(select_stmt) except weedb.NoColumnError: raise weewx.UnknownType(aggregate_type) value = row[0] if row else None # Look up the unit type and group of this combination of observation type and aggregation: u, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Time derivatives have special rules. For example, the time derivative of watt-hours is # watts, scaled by the number of seconds in an hour. The unit group also changes to # group_power. if aggregate_type == 'tderiv': if u == 'watt_second': u = 'watt' elif u == 'watt_hour': u = 'watt' value *= 3600 elif u == 'kilowatt_hour': u = 'kilowatt' value *= 3600 g = 'group_power' # Form the ValueTuple and return it: return weewx.units.ValueTuple(value, u, g) # # ######################## Class DailySummaries ############################## # class DailySummaries(XType): """Calculate from the daily summaries.""" # Set of SQL statements to be used for calculating aggregates from the daily summaries. daily_sql_dict = { 'avg': "SELECT SUM(wsum),SUM(sumtime) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'count': "SELECT SUM(count) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'gustdir': "SELECT max_dir FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY max DESC, maxtime ASC LIMIT 1", 'max': "SELECT MAX(max) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'max_ge': "SELECT SUM(max >= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'max_le': "SELECT SUM(max <= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'maxmin': "SELECT MAX(min) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'maxmintime': "SELECT mintime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY min DESC, mintime ASC LIMIT 1", 'maxsum': "SELECT MAX(sum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'maxsumtime': "SELECT maxtime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY sum DESC, maxtime ASC LIMIT 1", 'maxtime': "SELECT maxtime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY max DESC, maxtime ASC LIMIT 1", 'meanmax': "SELECT AVG(max) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'meanmin': "SELECT AVG(min) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'min': "SELECT MIN(min) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'min_ge': "SELECT SUM(min >= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'min_le': "SELECT SUM(min <= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'minmax': "SELECT MIN(max) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'minmaxtime': "SELECT maxtime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY max ASC, maxtime ASC ", 'minsum': "SELECT MIN(sum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'minsumtime': "SELECT mintime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY sum ASC, mintime ASC LIMIT 1", 'mintime': "SELECT mintime FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s " "ORDER BY min ASC, mintime ASC LIMIT 1", 'rms': "SELECT SUM(wsquaresum),SUM(sumtime) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'sum': "SELECT SUM(sum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'sum_ge': "SELECT SUM(sum >= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'sum_le': "SELECT SUM(sum <= %(val)s) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'vecavg': "SELECT SUM(xsum),SUM(ysum),SUM(sumtime) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", 'vecdir': "SELECT SUM(xsum),SUM(ysum) FROM %(table_name)s_day_%(obs_key)s " "WHERE dateTime >= %(start)s AND dateTime < %(stop)s", } @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns an aggregation of a statistical type for a given time period, by using the daily summaries. obs_type: The type over which aggregation is to be done (e.g., 'barometer', 'outTemp', 'rain', ...) timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result.""" # Check to see if this is a valid daily summary type: if not hasattr(db_manager, 'daykeys') or obs_type not in db_manager.daykeys: raise weewx.UnknownType(obs_type) aggregate_type = aggregate_type.lower() # Raise exception if we don't know about this type of aggregation if aggregate_type not in DailySummaries.daily_sql_dict: raise weewx.UnknownAggregation(aggregate_type) # We cannot use the day summaries if the starting and ending times of the aggregation # interval are not on midnight boundaries, and are not the first or last records in the # database. if db_manager.first_timestamp is None or db_manager.last_timestamp is None: raise weewx.UnknownAggregation(aggregate_type) if not (isStartOfDay(timespan.start) or timespan.start == db_manager.first_timestamp) \ or not (isStartOfDay(timespan.stop) or timespan.stop == db_manager.last_timestamp): raise weewx.UnknownAggregation(aggregate_type) val = option_dict.get('val') if val is None: target_val = None else: # The following is for backwards compatibility when ValueTuples had # just two members. This hack avoids breaking old skins. if len(val) == 2: if val[1] in ['degree_F', 'degree_C']: val += ("group_temperature",) elif val[1] in ['inch', 'mm', 'cm']: val += ("group_rain",) target_val = weewx.units.convertStd(val, db_manager.std_unit_system)[0] # Form the interpolation dictionary inter_dict = { 'start': weeutil.weeutil.startOfDay(timespan.start), 'stop': timespan.stop, 'obs_key': obs_type, 'aggregate_type': aggregate_type, 'val': target_val, 'table_name': db_manager.table_name } # Run the query against the database: row = db_manager.getSql(DailySummaries.daily_sql_dict[aggregate_type] % inter_dict) # Each aggregation type requires a slightly different calculation. if not row or None in row: # If no row was returned, or if it contains any nulls (meaning that not # all required data was available to calculate the requested aggregate), # then set the resulting value to None. value = None elif aggregate_type in ['min', 'maxmin', 'max', 'minmax', 'meanmin', 'meanmax', 'maxsum', 'minsum', 'sum', 'gustdir']: # These aggregates are passed through 'as is'. value = row[0] elif aggregate_type in ['mintime', 'maxmintime', 'maxtime', 'minmaxtime', 'maxsumtime', 'minsumtime', 'count', 'max_ge', 'max_le', 'min_ge', 'min_le', 'sum_ge', 'sum_le']: # These aggregates are always integers: value = int(row[0]) elif aggregate_type == 'avg': value = row[0] / row[1] if row[1] else None elif aggregate_type == 'rms': value = math.sqrt(row[0] / row[1]) if row[1] else None elif aggregate_type == 'vecavg': value = math.sqrt((row[0] ** 2 + row[1] ** 2) / row[2] ** 2) if row[2] else None elif aggregate_type == 'vecdir': if row == (0.0, 0.0): value = None else: deg = 90.0 - math.degrees(math.atan2(row[1], row[0])) value = deg if deg >= 0 else deg + 360.0 else: # Unknown aggregation. Should not have gotten this far... raise ValueError("Unexpected error. Aggregate type '%s'" % aggregate_type) # Look up the unit type and group of this combination of observation type and aggregation: t, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Form the ValueTuple and return it: return weewx.units.ValueTuple(value, t, g) # # ######################## Class AggregateHeatCool ############################## # class AggregateHeatCool(XType): """Calculate heating and cooling degree-days.""" # Default base temperature and unit type for heating and cooling degree days, # as a value tuple default_heatbase = (65.0, "degree_F", "group_temperature") default_coolbase = (65.0, "degree_F", "group_temperature") default_growbase = (50.0, "degree_F", "group_temperature") @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns heating and cooling degree days over a time period. obs_type: The type over which aggregation is to be done. Must be one of 'heatdeg', 'cooldeg', or 'growdeg'. timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. Must be 'avg' or 'sum'. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. """ # Check to see whether heating or cooling degree days are being asked for: if obs_type not in ['heatdeg', 'cooldeg', 'growdeg']: raise weewx.UnknownType(obs_type) # Only summation (total) or average heating or cooling degree days is supported: if aggregate_type not in ['sum', 'avg']: raise weewx.UnknownAggregation(aggregate_type) # Get the base for heating and cooling degree-days units_dict = option_dict.get('skin_dict', {}).get('Units', {}) dd_dict = units_dict.get('DegreeDays', {}) heatbase = dd_dict.get('heating_base', AggregateHeatCool.default_heatbase) coolbase = dd_dict.get('cooling_base', AggregateHeatCool.default_coolbase) growbase = dd_dict.get('growing_base', AggregateHeatCool.default_growbase) # Convert to a ValueTuple in the same unit system as the database heatbase_t = weewx.units.convertStd((float(heatbase[0]), heatbase[1], "group_temperature"), db_manager.std_unit_system) coolbase_t = weewx.units.convertStd((float(coolbase[0]), coolbase[1], "group_temperature"), db_manager.std_unit_system) growbase_t = weewx.units.convertStd((float(growbase[0]), growbase[1], "group_temperature"), db_manager.std_unit_system) total = 0.0 count = 0 for daySpan in weeutil.weeutil.genDaySpans(timespan.start, timespan.stop): # Get the average temperature for the day as a value tuple: Tavg_t = DailySummaries.get_aggregate('outTemp', daySpan, 'avg', db_manager) # Make sure it's valid before including it in the aggregation: if Tavg_t is not None and Tavg_t[0] is not None: if obs_type == 'heatdeg': total += weewx.wxformulas.heating_degrees(Tavg_t[0], heatbase_t[0]) elif obs_type == 'cooldeg': total += weewx.wxformulas.cooling_degrees(Tavg_t[0], coolbase_t[0]) else: total += weewx.wxformulas.cooling_degrees(Tavg_t[0], growbase_t[0]) count += 1 if aggregate_type == 'sum': value = total else: value = total / count if count else None # Look up the unit type and group of the result: t, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Return as a value tuple return weewx.units.ValueTuple(value, t, g) # ############################# WindVec extensions ######################################### class WindVec(XType): """Extensions for calculating special observation types 'windvec' and 'windgustvec'. It provides functions for calculating series, and for calculating aggregates. """ windvec_types = { 'windvec': ('windSpeed', 'windDir'), 'windgustvec': ('windGust', 'windGustDir') } agg_sql_dict = { 'count': "SELECT COUNT(dateTime), usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL)", 'first': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY dateTime ASC LIMIT 1", 'last': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY dateTime DESC LIMIT 1", 'min': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY %(mag)s ASC LIMIT 1;", 'max': "SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s " "WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND %(mag)s IS NOT NULL " "ORDER BY %(mag)s DESC LIMIT 1;", } # for types 'avg', 'sum' complex_sql_wind = 'SELECT %(mag)s, %(dir)s, usUnits FROM %(table_name)s WHERE dateTime > ? ' \ 'AND dateTime <= ?' @staticmethod def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Get a series, possibly with aggregation, for special 'wind vector' types. These are typically used for the wind vector plots. """ # Check to see if the requested type is not 'windvec' or 'windgustvec' if obs_type not in WindVec.windvec_types: # The type is not one of the extended wind types. We can't handle it. raise weewx.UnknownType(obs_type) # It is an extended wind type. Prepare the lists that will hold the # final results. start_vec = list() stop_vec = list() data_vec = list() # Is aggregation requested? if aggregate_type: # Yes. Just use the regular series function. When it comes time to do the aggregation, # the specialized function WindVec.get_aggregate() (defined below), will be used. return ArchiveTable.get_series(obs_type, timespan, db_manager, aggregate_type, aggregate_interval) else: # No aggregation desired. However, we have will have to assemble the wind vector from # its flattened types. This SQL select string will select the proper wind types sql_str = 'SELECT dateTime, %s, %s, usUnits, `interval` FROM %s ' \ 'WHERE dateTime >= ? AND dateTime <= ?' \ % (WindVec.windvec_types[obs_type][0], WindVec.windvec_types[obs_type][1], db_manager.table_name) std_unit_system = None for record in db_manager.genSql(sql_str, timespan): ts, magnitude, direction, unit_system, interval = record if std_unit_system: if std_unit_system != unit_system: raise weewx.UnsupportedFeature( "Unit type cannot change within a time interval.") else: std_unit_system = unit_system value = weeutil.weeutil.to_complex(magnitude, direction) start_vec.append(ts - interval * 60) stop_vec.append(ts) data_vec.append(value) unit, unit_group = weewx.units.getStandardUnitType(std_unit_system, obs_type, aggregate_type) return (ValueTuple(start_vec, 'unix_epoch', 'group_time'), ValueTuple(stop_vec, 'unix_epoch', 'group_time'), ValueTuple(data_vec, unit, unit_group)) @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Returns an aggregation of a wind vector type over a timespan by using the main archive table. obs_type: The type over which aggregation is to be done. For this function, it must be 'windvec' or 'windgustvec'. Anything else will cause weewx.UnknownType to be raised. timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. For this function, must be 'avg', 'sum', 'count', 'first', 'last', 'min', or 'max'. Anything else will cause weewx.UnknownAggregation to be raised. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. Note that the value contained in the ValueTuple will be a complex number. """ if obs_type not in WindVec.windvec_types: raise weewx.UnknownType(obs_type) aggregate_type = aggregate_type.lower() # Raise exception if we don't know about this type of aggregation if aggregate_type not in ['avg', 'sum'] + list(WindVec.agg_sql_dict.keys()): raise weewx.UnknownAggregation(aggregate_type) # Form the interpolation dictionary interpolation_dict = { 'dir': WindVec.windvec_types[obs_type][1], 'mag': WindVec.windvec_types[obs_type][0], 'start': timespan.start, 'stop': timespan.stop, 'table_name': db_manager.table_name } if aggregate_type in WindVec.agg_sql_dict: # For these types (e.g., first, last, etc.), we can do the aggregation in a SELECT # statement. select_stmt = WindVec.agg_sql_dict[aggregate_type] % interpolation_dict row = db_manager.getSql(select_stmt) if row: if aggregate_type == 'count': value, std_unit_system = row else: magnitude, direction, std_unit_system = row value = weeutil.weeutil.to_complex(magnitude, direction) else: std_unit_system = db_manager.std_unit_system value = None else: # The result is more complex, requiring vector arithmetic. We will have to do it # in Python std_unit_system = None xsum = ysum = 0.0 count = 0 select_stmt = WindVec.complex_sql_wind % interpolation_dict for rec in db_manager.genSql(select_stmt, timespan): # Unpack the record mag, direction, unit_system = rec # Ignore rows where magnitude is NULL if mag is None: continue # A good direction is necessary unless the mag is zero: if mag == 0.0 or direction is not None: if std_unit_system: if std_unit_system != unit_system: raise weewx.UnsupportedFeature( "Unit type cannot change within a time interval.") else: std_unit_system = unit_system # An undefined direction is OK (and expected) if the magnitude # is zero. But, in that case, it doesn't contribute to the sums either. if direction is None: # Sanity check if weewx.debug: assert (mag == 0.0) else: xsum += mag * math.cos(math.radians(90.0 - direction)) ysum += mag * math.sin(math.radians(90.0 - direction)) count += 1 # We've gone through the whole interval. Were there any good data? if count: # Form the requested aggregation: if aggregate_type == 'sum': value = complex(xsum, ysum) else: # Must be 'avg' value = complex(xsum, ysum) / count else: value = None # Look up the unit type and group of this combination of observation type and aggregation: t, g = weewx.units.getStandardUnitType(std_unit_system, obs_type, aggregate_type) # Form the ValueTuple and return it: return weewx.units.ValueTuple(value, t, g) class WindVecDaily(XType): """Extension for calculating the average windvec, using the daily summaries.""" @staticmethod def get_aggregate(obs_type, timespan, aggregate_type, db_manager, **option_dict): """Optimization for calculating 'avg' aggregations for type 'windvec'. The timespan must be on a daily boundary.""" # We can only do observation type 'windvec' if obs_type != 'windvec': # We can't handle it. raise weewx.UnknownType(obs_type) # We can only do 'avg'' if aggregate_type != 'avg': raise weewx.UnknownAggregation(aggregate_type) # We cannot use the day summaries if the starting and ending times of the aggregation # interval are not on midnight boundaries, and are not the first or last records in the # database. if not (isStartOfDay(timespan.start) or timespan.start == db_manager.first_timestamp) \ or not (isStartOfDay(timespan.stop) or timespan.stop == db_manager.last_timestamp): raise weewx.UnknownAggregation(aggregate_type) sql = 'SELECT SUM(xsum), SUM(ysum), SUM(dirsumtime) ' \ 'FROM %s_day_wind WHERE dateTime>=? AND dateTime<?;' % db_manager.table_name row = db_manager.getSql(sql, timespan) if not row or None in row or not row[2]: # If no row was returned, or if it contains any nulls (meaning that not # all required data was available to calculate the requested aggregate), # then set the resulting value to None. value = None else: value = complex(row[0], row[1]) / row[2] # Look up the unit type and group of the result: t, g = weewx.units.getStandardUnitType(db_manager.std_unit_system, obs_type, aggregate_type) # Return as a value tuple return weewx.units.ValueTuple(value, t, g) class XTypeTable(XType): """Calculate a series for an xtype. An xtype may not necessarily be in the database, so this version calculates it on the fly. Note: this version only works if no aggregation has been requested.""" @staticmethod def get_series(obs_type, timespan, db_manager, aggregate_type=None, aggregate_interval=None): """Get a series of an xtype, by using the main archive table. Works only for no aggregation. """ start_vec = list() stop_vec = list() data_vec = list() if aggregate_type: # This version does not know how to do aggregations, although this could be # added in the future. raise weewx.UnknownAggregation(aggregate_type) else: # No aggregation std_unit_system = None # Hit the database. for record in db_manager.genBatchRecords(*timespan): if std_unit_system: if std_unit_system != record['usUnits']: raise weewx.UnsupportedFeature("Unit system cannot change " "within a series.") else: std_unit_system = record['usUnits'] # Given a record, use the xtypes system to calculate a value: value = get_scalar(obs_type, record, db_manager) start_vec.append(record['dateTime']- record['interval'] * 60) stop_vec.append(record['dateTime']) data_vec.append(value[0]) unit, unit_group = weewx.units.getStandardUnitType(std_unit_system, obs_type) return (ValueTuple(start_vec, 'unix_epoch', 'group_time'), ValueTuple(stop_vec, 'unix_epoch', 'group_time'), ValueTuple(data_vec, unit, unit_group)) # Add instantiated versions to the extension list. Order matters. We want the highly-specialized # versions first, because they might offer optimizations. xtypes.append(WindVecDaily()) xtypes.append(WindVec()) xtypes.append(AggregateHeatCool()) xtypes.append(DailySummaries()) xtypes.append(ArchiveTable()) xtypes.append(XTypeTable())
en
0.83212
# # Copyright (c) 2019-2020 <NAME> <<EMAIL>> # # See the file LICENSE.txt for your full rights. # User-defined extensions to the WeeWX type system # A list holding the type extensions. Each entry should be a subclass of XType, defined below. Base class for extensions to the WeeWX type system. Calculate a scalar. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the type is not there. Calculate a series, possibly with aggregation. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the series is not there. Calculate an aggregation. Specializing versions should raise... - an exception of type `weewx.UnknownType`, if the type `obs_type` is unknown to the function. - an exception of type `weewx.UnknownAggregation` if the aggregation type `aggregate_type` is unknown to the function. - an exception of type `weewx.CannotCalculate` if the type is known to the function, but all the information necessary to calculate the type is not there. Opportunity to do any clean up. # ##################### Retrieval functions ########################### Return a scalar value # Search the list, looking for a get_scalar() method that does not raise an exception # Try this function. It will raise an exception if it does not know about the type. # This function does not know about the type. Move on to the next one. # None of the functions worked. Return a series (aka vector) of, possibly aggregated, values. # Search the list, looking for a get_series() method that does not raise an exception # Try this function. It will raise an exception if it does not know about the type. # This function does not know about the type. Move on to the next one. # None of the functions worked. Calculate an aggregation over a timespan # Search the list, looking for a get_aggregate() method that does not raise an exception # Try this function. It will raise an exception if it doesn't know about the type of # aggregation. # # ######################## Class ArchiveTable ############################## # Calculate types and aggregates directly from the archive table Get a series, possibly with aggregation, from the main archive database. The general strategy is that if aggregation is asked for, chop the series up into separate chunks, calculating the aggregate for each chunk. Then assemble the results. If no aggregation is called for, just return the data directly out of the database. # With aggregation # Get the aggregate as a ValueTuple # It's OK if the unit is unknown (=None). # No aggregation # Hit the database. It's possible the type is not in the database, so be prepared # to catch a NoColumnError: # Unpack the record # The sql type doesn't exist. Convert to an UnknownType error # Set of SQL statements to be used for calculating aggregates from the main archive table. Returns an aggregation of an observation type over a given time period, using the main archive table. obs_type: The type over which aggregation is to be done (e.g., 'barometer', 'outTemp', 'rain', ...) timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. # Look up the unit type and group of this combination of observation type and aggregation: # Time derivatives have special rules. For example, the time derivative of watt-hours is # watts, scaled by the number of seconds in an hour. The unit group also changes to # group_power. # Form the ValueTuple and return it: # # ######################## Class DailySummaries ############################## # Calculate from the daily summaries. # Set of SQL statements to be used for calculating aggregates from the daily summaries. Returns an aggregation of a statistical type for a given time period, by using the daily summaries. obs_type: The type over which aggregation is to be done (e.g., 'barometer', 'outTemp', 'rain', ...) timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. # Check to see if this is a valid daily summary type: # Raise exception if we don't know about this type of aggregation # We cannot use the day summaries if the starting and ending times of the aggregation # interval are not on midnight boundaries, and are not the first or last records in the # database. # The following is for backwards compatibility when ValueTuples had # just two members. This hack avoids breaking old skins. # Form the interpolation dictionary # Run the query against the database: # Each aggregation type requires a slightly different calculation. # If no row was returned, or if it contains any nulls (meaning that not # all required data was available to calculate the requested aggregate), # then set the resulting value to None. # These aggregates are passed through 'as is'. # These aggregates are always integers: # Unknown aggregation. Should not have gotten this far... # Look up the unit type and group of this combination of observation type and aggregation: # Form the ValueTuple and return it: # # ######################## Class AggregateHeatCool ############################## # Calculate heating and cooling degree-days. # Default base temperature and unit type for heating and cooling degree days, # as a value tuple Returns heating and cooling degree days over a time period. obs_type: The type over which aggregation is to be done. Must be one of 'heatdeg', 'cooldeg', or 'growdeg'. timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. Must be 'avg' or 'sum'. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. # Check to see whether heating or cooling degree days are being asked for: # Only summation (total) or average heating or cooling degree days is supported: # Get the base for heating and cooling degree-days # Convert to a ValueTuple in the same unit system as the database # Get the average temperature for the day as a value tuple: # Make sure it's valid before including it in the aggregation: # Look up the unit type and group of the result: # Return as a value tuple # ############################# WindVec extensions ######################################### Extensions for calculating special observation types 'windvec' and 'windgustvec'. It provides functions for calculating series, and for calculating aggregates. # for types 'avg', 'sum' Get a series, possibly with aggregation, for special 'wind vector' types. These are typically used for the wind vector plots. # Check to see if the requested type is not 'windvec' or 'windgustvec' # The type is not one of the extended wind types. We can't handle it. # It is an extended wind type. Prepare the lists that will hold the # final results. # Is aggregation requested? # Yes. Just use the regular series function. When it comes time to do the aggregation, # the specialized function WindVec.get_aggregate() (defined below), will be used. # No aggregation desired. However, we have will have to assemble the wind vector from # its flattened types. This SQL select string will select the proper wind types Returns an aggregation of a wind vector type over a timespan by using the main archive table. obs_type: The type over which aggregation is to be done. For this function, it must be 'windvec' or 'windgustvec'. Anything else will cause weewx.UnknownType to be raised. timespan: An instance of weeutil.Timespan with the time period over which aggregation is to be done. aggregate_type: The type of aggregation to be done. For this function, must be 'avg', 'sum', 'count', 'first', 'last', 'min', or 'max'. Anything else will cause weewx.UnknownAggregation to be raised. db_manager: An instance of weewx.manager.Manager or subclass. option_dict: Not used in this version. returns: A ValueTuple containing the result. Note that the value contained in the ValueTuple will be a complex number. # Raise exception if we don't know about this type of aggregation # Form the interpolation dictionary # For these types (e.g., first, last, etc.), we can do the aggregation in a SELECT # statement. # The result is more complex, requiring vector arithmetic. We will have to do it # in Python # Unpack the record # Ignore rows where magnitude is NULL # A good direction is necessary unless the mag is zero: # An undefined direction is OK (and expected) if the magnitude # is zero. But, in that case, it doesn't contribute to the sums either. # Sanity check # We've gone through the whole interval. Were there any good data? # Form the requested aggregation: # Must be 'avg' # Look up the unit type and group of this combination of observation type and aggregation: # Form the ValueTuple and return it: Extension for calculating the average windvec, using the daily summaries. Optimization for calculating 'avg' aggregations for type 'windvec'. The timespan must be on a daily boundary. # We can only do observation type 'windvec' # We can't handle it. # We can only do 'avg'' # We cannot use the day summaries if the starting and ending times of the aggregation # interval are not on midnight boundaries, and are not the first or last records in the # database. # If no row was returned, or if it contains any nulls (meaning that not # all required data was available to calculate the requested aggregate), # then set the resulting value to None. # Look up the unit type and group of the result: # Return as a value tuple Calculate a series for an xtype. An xtype may not necessarily be in the database, so this version calculates it on the fly. Note: this version only works if no aggregation has been requested. Get a series of an xtype, by using the main archive table. Works only for no aggregation. # This version does not know how to do aggregations, although this could be # added in the future. # No aggregation # Hit the database. # Given a record, use the xtypes system to calculate a value: # Add instantiated versions to the extension list. Order matters. We want the highly-specialized # versions first, because they might offer optimizations.
2.474832
2
a2ml/cmdl/cmdl.py
gitter-badger/a2ml
0
6629839
<filename>a2ml/cmdl/cmdl.py import sys import click from a2ml.api.utils.context import CONTEXT_SETTINGS from a2ml.api.utils.context import pass_context COMMANDS = [ 'auth', 'new', 'import', 'train', 'evaluate', 'deploy', 'predict', 'review', 'project', 'dataset', 'experiment', 'model', 'server', 'worker' ] class A2mlCli(click.MultiCommand): def list_commands(self, ctx): return COMMANDS def get_command(self, ctx, name): try: if sys.version_info[0] == 2: name = name.encode('ascii', 'replace') mod = __import__('a2ml.cmdl.commands.cmd_' + name, None, None, ['cli']) except ImportError: import traceback traceback.print_exc() return return mod.cmdl @click.command(cls=A2mlCli, context_settings=CONTEXT_SETTINGS) @pass_context def cmdl(ctx): """A2ML command line interface."""
<filename>a2ml/cmdl/cmdl.py import sys import click from a2ml.api.utils.context import CONTEXT_SETTINGS from a2ml.api.utils.context import pass_context COMMANDS = [ 'auth', 'new', 'import', 'train', 'evaluate', 'deploy', 'predict', 'review', 'project', 'dataset', 'experiment', 'model', 'server', 'worker' ] class A2mlCli(click.MultiCommand): def list_commands(self, ctx): return COMMANDS def get_command(self, ctx, name): try: if sys.version_info[0] == 2: name = name.encode('ascii', 'replace') mod = __import__('a2ml.cmdl.commands.cmd_' + name, None, None, ['cli']) except ImportError: import traceback traceback.print_exc() return return mod.cmdl @click.command(cls=A2mlCli, context_settings=CONTEXT_SETTINGS) @pass_context def cmdl(ctx): """A2ML command line interface."""
en
0.741619
A2ML command line interface.
2.3993
2
libcc/gets.py
MICLab-Unicamp/inCCsight
2
6629840
<reponame>MICLab-Unicamp/inCCsight # coding: utf-8 def getTheCC(segmentation): import numpy as np from skimage.measure import label, regionprops labels = label(segmentation, neighbors=4) regions = regionprops(labels) theCC = [] maxwidth = 0 i = 1 ymed = None xmed = None # background is labeled as 0 for props in regions[1:]: minr, minc, maxr, maxc = props.bbox dx = maxc-minc dy = maxr-minr if dx > maxwidth: maxwidth = dx if maxr < 60: theCC = labels == i+1 ymed = maxr-dy/2 xmed = maxc-dx/2 i=i+1 return theCC, ymed, xmed def getCentralPoint(kmeans, k, kpoints): import numpy as np # kmeans = result of cluster.Kmeans().fit_predict() kcenters = [] for i in range(0,k): # Get min and max idx from same label min_idx = np.min(np.where(kmeans == i)) max_idx = np.max(np.where(kmeans == i)) # Middle term idx mid_idx = min_idx + (max_idx-min_idx)/2 # Get value using idx xk = int(round(kpoints[mid_idx][0])) yk = int(round(kpoints[mid_idx][1])) kcenters.append([xk,yk]) return kcenters def getGroupPoints(kmeans, k, offset, kpoints): import numpy as np # kmeans = result of cluster.Kmeans().fit_predict() # offset = how many points in the borders will be ignored kcenters = [] for i in range(0, k): # Get min and max idx from same label min_idx = np.min(np.where(kmeans == i)) max_idx = np.max(np.where(kmeans == i)) for j in range(min_idx + offset, max_idx - offset): # Get value using idx xk = kpoints[j][0] yk = kpoints[j][1] kcenters.append([i,xk,yk]) return kcenters def getScalars(segm, wFA, wMD, wRD, wAD): import numpy as np # Total value meanFA = np.mean(wFA[segm==True]) stdFA = np.std(wFA[segm==True]) meanMD = np.mean(wMD[segm==True]) stdMD = np.std(wMD[segm==True]) meanRD = np.mean(wRD[segm==True]) stdRD = np.std(wRD[segm==True]) meanAD = np.mean(wAD[segm==True]) stdAD = np.std(wAD[segm==True]) return meanFA, stdFA, meanMD, stdMD, meanRD, stdRD, meanAD, stdAD def getFAmidline(segm, wFA_ms, n_points=200): import numpy as np from libcc import points # Get CC's midline px, py = points(segm, n_points+1) fa_line = [] for aux in range(0, n_points): try: x = int(round(px[aux])) y = int(round(py[aux])) fa = wFA_ms[y,x] except: x = int(np.floor(px[aux])) y = int(np.floor(py[aux])) fa = wFA_ms[y,x] fa_line.append(fa) return fa_line def getData(parcel, FA, MD, RD, AD): import numpy as np data = {} # Initialize for region in ['P1', 'P2', 'P3', 'P4', 'P5']: data[region] = {} # Parcel values for i in range(2,7): data['P'+str(i-1)]['FA'] = np.mean(FA[parcel==i]) data['P'+str(i-1)]['FA StdDev'] = np.std(FA[parcel==i]) data['P'+str(i-1)]['MD'] = np.mean(MD[parcel==i]) data['P'+str(i-1)]['MD StdDev'] = np.std(MD[parcel==i]) data['P'+str(i-1)]['RD'] = np.mean(RD[parcel==i]) data['P'+str(i-1)]['RD StdDev'] = np.std(RD[parcel==i]) data['P'+str(i-1)]['AD'] = np.mean(AD[parcel==i]) data['P'+str(i-1)]['AD StdDev'] = np.std(AD[parcel==i]) return data def getLargestConnectedComponent(segmentation): from skimage.measure import label, regionprops import numpy as np labels = label(segmentation) assert(labels.max() != 0 ) # assume at least 1 CC cc = labels == np.argmax(np.bincount(labels.flat)[1:])+1 return cc
# coding: utf-8 def getTheCC(segmentation): import numpy as np from skimage.measure import label, regionprops labels = label(segmentation, neighbors=4) regions = regionprops(labels) theCC = [] maxwidth = 0 i = 1 ymed = None xmed = None # background is labeled as 0 for props in regions[1:]: minr, minc, maxr, maxc = props.bbox dx = maxc-minc dy = maxr-minr if dx > maxwidth: maxwidth = dx if maxr < 60: theCC = labels == i+1 ymed = maxr-dy/2 xmed = maxc-dx/2 i=i+1 return theCC, ymed, xmed def getCentralPoint(kmeans, k, kpoints): import numpy as np # kmeans = result of cluster.Kmeans().fit_predict() kcenters = [] for i in range(0,k): # Get min and max idx from same label min_idx = np.min(np.where(kmeans == i)) max_idx = np.max(np.where(kmeans == i)) # Middle term idx mid_idx = min_idx + (max_idx-min_idx)/2 # Get value using idx xk = int(round(kpoints[mid_idx][0])) yk = int(round(kpoints[mid_idx][1])) kcenters.append([xk,yk]) return kcenters def getGroupPoints(kmeans, k, offset, kpoints): import numpy as np # kmeans = result of cluster.Kmeans().fit_predict() # offset = how many points in the borders will be ignored kcenters = [] for i in range(0, k): # Get min and max idx from same label min_idx = np.min(np.where(kmeans == i)) max_idx = np.max(np.where(kmeans == i)) for j in range(min_idx + offset, max_idx - offset): # Get value using idx xk = kpoints[j][0] yk = kpoints[j][1] kcenters.append([i,xk,yk]) return kcenters def getScalars(segm, wFA, wMD, wRD, wAD): import numpy as np # Total value meanFA = np.mean(wFA[segm==True]) stdFA = np.std(wFA[segm==True]) meanMD = np.mean(wMD[segm==True]) stdMD = np.std(wMD[segm==True]) meanRD = np.mean(wRD[segm==True]) stdRD = np.std(wRD[segm==True]) meanAD = np.mean(wAD[segm==True]) stdAD = np.std(wAD[segm==True]) return meanFA, stdFA, meanMD, stdMD, meanRD, stdRD, meanAD, stdAD def getFAmidline(segm, wFA_ms, n_points=200): import numpy as np from libcc import points # Get CC's midline px, py = points(segm, n_points+1) fa_line = [] for aux in range(0, n_points): try: x = int(round(px[aux])) y = int(round(py[aux])) fa = wFA_ms[y,x] except: x = int(np.floor(px[aux])) y = int(np.floor(py[aux])) fa = wFA_ms[y,x] fa_line.append(fa) return fa_line def getData(parcel, FA, MD, RD, AD): import numpy as np data = {} # Initialize for region in ['P1', 'P2', 'P3', 'P4', 'P5']: data[region] = {} # Parcel values for i in range(2,7): data['P'+str(i-1)]['FA'] = np.mean(FA[parcel==i]) data['P'+str(i-1)]['FA StdDev'] = np.std(FA[parcel==i]) data['P'+str(i-1)]['MD'] = np.mean(MD[parcel==i]) data['P'+str(i-1)]['MD StdDev'] = np.std(MD[parcel==i]) data['P'+str(i-1)]['RD'] = np.mean(RD[parcel==i]) data['P'+str(i-1)]['RD StdDev'] = np.std(RD[parcel==i]) data['P'+str(i-1)]['AD'] = np.mean(AD[parcel==i]) data['P'+str(i-1)]['AD StdDev'] = np.std(AD[parcel==i]) return data def getLargestConnectedComponent(segmentation): from skimage.measure import label, regionprops import numpy as np labels = label(segmentation) assert(labels.max() != 0 ) # assume at least 1 CC cc = labels == np.argmax(np.bincount(labels.flat)[1:])+1 return cc
en
0.670278
# coding: utf-8 # background is labeled as 0 # kmeans = result of cluster.Kmeans().fit_predict() # Get min and max idx from same label # Middle term idx # Get value using idx # kmeans = result of cluster.Kmeans().fit_predict() # offset = how many points in the borders will be ignored # Get min and max idx from same label # Get value using idx # Total value # Get CC's midline # Initialize # Parcel values # assume at least 1 CC
2.468541
2
output_video.py
stfkolev/Solaris-IPG
1
6629841
import cv2 import numpy as np import os import argparse # parser = argparse.ArgumentParser(description='arguments') # parser.add_argument('--target', action='store_true', help='plot paths') # args = parser.parse_args() from os.path import isfile, join # Here is all the parameters you need to change fps = 5.0 # shuould be 1/dt where dt is your sampling rate target = 'radar' # should be the type of figure you want to plot # either: lidar, radar, or dynamic file_num = 87 # the total number of samples you have. # should be the largest number in your lidar_step#.dat # End of all the parameters definition frame_array = [] for i in range(file_num): filename='output_img/' + target + str(i+1) + '.png' #reading each files img = cv2.imread(filename) height, width, layers = img.shape size = (width,height) print(filename) #inserting the frames into an image array frame_array.append(img) out = cv2.VideoWriter('video/' + target + '.avi',cv2.cv.CV_FOURCC(*'XVID'), fps, size) for i in range(len(frame_array)): # writing to a image array out.write(frame_array[i]) out.release()
import cv2 import numpy as np import os import argparse # parser = argparse.ArgumentParser(description='arguments') # parser.add_argument('--target', action='store_true', help='plot paths') # args = parser.parse_args() from os.path import isfile, join # Here is all the parameters you need to change fps = 5.0 # shuould be 1/dt where dt is your sampling rate target = 'radar' # should be the type of figure you want to plot # either: lidar, radar, or dynamic file_num = 87 # the total number of samples you have. # should be the largest number in your lidar_step#.dat # End of all the parameters definition frame_array = [] for i in range(file_num): filename='output_img/' + target + str(i+1) + '.png' #reading each files img = cv2.imread(filename) height, width, layers = img.shape size = (width,height) print(filename) #inserting the frames into an image array frame_array.append(img) out = cv2.VideoWriter('video/' + target + '.avi',cv2.cv.CV_FOURCC(*'XVID'), fps, size) for i in range(len(frame_array)): # writing to a image array out.write(frame_array[i]) out.release()
en
0.445768
# parser = argparse.ArgumentParser(description='arguments') # parser.add_argument('--target', action='store_true', help='plot paths') # args = parser.parse_args() # Here is all the parameters you need to change # shuould be 1/dt where dt is your sampling rate # should be the type of figure you want to plot # either: lidar, radar, or dynamic # the total number of samples you have. # should be the largest number in your lidar_step#.dat # End of all the parameters definition #reading each files #inserting the frames into an image array # writing to a image array
2.865496
3
setup.py
tooxo/distest
0
6629842
#!/usr/bin/env python3 # encoding: utf-8 from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="distest", version="0.3.1", description="Automate the testing of discord bots... With discord bots!", long_description=long_description, long_description_content_type="text/markdown", url="http://github.com/JakeCover/distest", author="<NAME>", author_email="<EMAIL>", license="MIT", packages=["distest"], install_requires=["discord.py>=1.0.0"], zip_safe=False, classifiers=["Topic :: Software Development :: Testing :: Unit"], keywords=[ "Discord", "Discord.py", "Unit Test", "Test", "Distest", "Discord Testing", ], )
#!/usr/bin/env python3 # encoding: utf-8 from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="distest", version="0.3.1", description="Automate the testing of discord bots... With discord bots!", long_description=long_description, long_description_content_type="text/markdown", url="http://github.com/JakeCover/distest", author="<NAME>", author_email="<EMAIL>", license="MIT", packages=["distest"], install_requires=["discord.py>=1.0.0"], zip_safe=False, classifiers=["Topic :: Software Development :: Testing :: Unit"], keywords=[ "Discord", "Discord.py", "Unit Test", "Test", "Distest", "Discord Testing", ], )
en
0.303075
#!/usr/bin/env python3 # encoding: utf-8
1.268732
1
examples/attach/boshclient.py
McPo/strophejs
1,047
6629843
<reponame>McPo/strophejs import sys, os import httplib, urllib import random, binascii from urlparse import urlparse from punjab.httpb import HttpbParse from twisted.words.xish import domish from twisted.words.protocols.jabber import jid TLS_XMLNS = 'urn:ietf:params:xml:ns:xmpp-tls' SASL_XMLNS = 'urn:ietf:params:xml:ns:xmpp-sasl' BIND_XMLNS = 'urn:ietf:params:xml:ns:xmpp-bind' SESSION_XMLNS = 'urn:ietf:params:xml:ns:xmpp-session' class BOSHClient: def __init__(self, jabberid, password, bosh_service): self.rid = random.randint(0, 10000000) self.jabberid = jid.internJID(jabberid) self.password = password self.authid = None self.sid = None self.logged_in = False self.headers = {"Content-type": "text/xml", "Accept": "text/xml"} self.bosh_service = urlparse(bosh_service) def buildBody(self, child=None): """Build a BOSH body. """ body = domish.Element(("http://jabber.org/protocol/httpbind", "body")) body['content'] = 'text/xml; charset=utf-8' self.rid = self.rid + 1 body['rid'] = str(self.rid) body['sid'] = str(self.sid) body['xml:lang'] = 'en' if child is not None: body.addChild(child) return body def sendBody(self, body): """Send the body. """ parser = HttpbParse(True) # start new session conn = httplib.HTTPConnection(self.bosh_service.netloc) conn.request("POST", self.bosh_service.path, body.toXml(), self.headers) response = conn.getresponse() data = '' if response.status == 200: data = response.read() conn.close() return parser.parse(data) def startSessionAndAuth(self, hold='1', wait='70'): # Create a session # create body body = domish.Element(("http://jabber.org/protocol/httpbind", "body")) body['content'] = 'text/xml; charset=utf-8' body['hold'] = hold body['rid'] = str(self.rid) body['to'] = self.jabberid.host body['wait'] = wait body['window'] = '5' body['xml:lang'] = 'en' retb, elems = self.sendBody(body) if type(retb) != str and retb.hasAttribute('authid') and \ retb.hasAttribute('sid'): self.authid = retb['authid'] self.sid = retb['sid'] # go ahead and auth auth = domish.Element((SASL_XMLNS, 'auth')) auth['mechanism'] = 'PLAIN' # TODO: add authzid if auth['mechanism'] == 'PLAIN': auth_str = "" auth_str += "\000" auth_str += self.jabberid.user.encode('utf-8') auth_str += "\000" try: auth_str += self.password.encode('utf-8').strip() except UnicodeDecodeError: auth_str += self.password.decode('latin1') \ .encode('utf-8').strip() auth.addContent(binascii.b2a_base64(auth_str)) retb, elems = self.sendBody(self.buildBody(auth)) if len(elems) == 0: # poll for data retb, elems = self.sendBody(self.buildBody()) if len(elems) > 0: if elems[0].name == 'success': retb, elems = self.sendBody(self.buildBody()) has_bind = False for child in elems[0].children: if child.name == 'bind': has_bind = True break if has_bind: iq = domish.Element(('jabber:client', 'iq')) iq['type'] = 'set' iq.addUniqueId() iq.addElement('bind') iq.bind['xmlns'] = BIND_XMLNS if self.jabberid.resource: iq.bind.addElement('resource') iq.bind.resource.addContent( self.jabberid.resource) retb, elems = self.sendBody(self.buildBody(iq)) if type(retb) != str and retb.name == 'body': # send session iq = domish.Element(('jabber:client', 'iq')) iq['type'] = 'set' iq.addUniqueId() iq.addElement('session') iq.session['xmlns'] = SESSION_XMLNS retb, elems = self.sendBody(self.buildBody(iq)) # did not bind, TODO - add a retry? if type(retb) != str and retb.name == 'body': self.logged_in = True # bump up the rid, punjab already # received self.rid self.rid += 1 if __name__ == '__main__': USERNAME = sys.argv[1] PASSWORD = sys.argv[2] SERVICE = sys.argv[3] c = BOSHClient(USERNAME, PASSWORD, SERVICE) c.startSessionAndAuth() print c.logged_in
import sys, os import httplib, urllib import random, binascii from urlparse import urlparse from punjab.httpb import HttpbParse from twisted.words.xish import domish from twisted.words.protocols.jabber import jid TLS_XMLNS = 'urn:ietf:params:xml:ns:xmpp-tls' SASL_XMLNS = 'urn:ietf:params:xml:ns:xmpp-sasl' BIND_XMLNS = 'urn:ietf:params:xml:ns:xmpp-bind' SESSION_XMLNS = 'urn:ietf:params:xml:ns:xmpp-session' class BOSHClient: def __init__(self, jabberid, password, bosh_service): self.rid = random.randint(0, 10000000) self.jabberid = jid.internJID(jabberid) self.password = password self.authid = None self.sid = None self.logged_in = False self.headers = {"Content-type": "text/xml", "Accept": "text/xml"} self.bosh_service = urlparse(bosh_service) def buildBody(self, child=None): """Build a BOSH body. """ body = domish.Element(("http://jabber.org/protocol/httpbind", "body")) body['content'] = 'text/xml; charset=utf-8' self.rid = self.rid + 1 body['rid'] = str(self.rid) body['sid'] = str(self.sid) body['xml:lang'] = 'en' if child is not None: body.addChild(child) return body def sendBody(self, body): """Send the body. """ parser = HttpbParse(True) # start new session conn = httplib.HTTPConnection(self.bosh_service.netloc) conn.request("POST", self.bosh_service.path, body.toXml(), self.headers) response = conn.getresponse() data = '' if response.status == 200: data = response.read() conn.close() return parser.parse(data) def startSessionAndAuth(self, hold='1', wait='70'): # Create a session # create body body = domish.Element(("http://jabber.org/protocol/httpbind", "body")) body['content'] = 'text/xml; charset=utf-8' body['hold'] = hold body['rid'] = str(self.rid) body['to'] = self.jabberid.host body['wait'] = wait body['window'] = '5' body['xml:lang'] = 'en' retb, elems = self.sendBody(body) if type(retb) != str and retb.hasAttribute('authid') and \ retb.hasAttribute('sid'): self.authid = retb['authid'] self.sid = retb['sid'] # go ahead and auth auth = domish.Element((SASL_XMLNS, 'auth')) auth['mechanism'] = 'PLAIN' # TODO: add authzid if auth['mechanism'] == 'PLAIN': auth_str = "" auth_str += "\000" auth_str += self.jabberid.user.encode('utf-8') auth_str += "\000" try: auth_str += self.password.encode('utf-8').strip() except UnicodeDecodeError: auth_str += self.password.decode('latin1') \ .encode('utf-8').strip() auth.addContent(binascii.b2a_base64(auth_str)) retb, elems = self.sendBody(self.buildBody(auth)) if len(elems) == 0: # poll for data retb, elems = self.sendBody(self.buildBody()) if len(elems) > 0: if elems[0].name == 'success': retb, elems = self.sendBody(self.buildBody()) has_bind = False for child in elems[0].children: if child.name == 'bind': has_bind = True break if has_bind: iq = domish.Element(('jabber:client', 'iq')) iq['type'] = 'set' iq.addUniqueId() iq.addElement('bind') iq.bind['xmlns'] = BIND_XMLNS if self.jabberid.resource: iq.bind.addElement('resource') iq.bind.resource.addContent( self.jabberid.resource) retb, elems = self.sendBody(self.buildBody(iq)) if type(retb) != str and retb.name == 'body': # send session iq = domish.Element(('jabber:client', 'iq')) iq['type'] = 'set' iq.addUniqueId() iq.addElement('session') iq.session['xmlns'] = SESSION_XMLNS retb, elems = self.sendBody(self.buildBody(iq)) # did not bind, TODO - add a retry? if type(retb) != str and retb.name == 'body': self.logged_in = True # bump up the rid, punjab already # received self.rid self.rid += 1 if __name__ == '__main__': USERNAME = sys.argv[1] PASSWORD = sys.argv[2] SERVICE = sys.argv[3] c = BOSHClient(USERNAME, PASSWORD, SERVICE) c.startSessionAndAuth() print c.logged_in
en
0.757027
Build a BOSH body. Send the body. # start new session # Create a session # create body # go ahead and auth # TODO: add authzid # poll for data # send session # did not bind, TODO - add a retry? # bump up the rid, punjab already # received self.rid
2.248299
2
atlaselectrophysiology/load_data_local.py
GaelleChapuis/iblapps
0
6629844
<gh_stars>0 import numpy as np from datetime import datetime import ibllib.atlas as atlas from pathlib import Path import alf.io import glob import json # brain_atlas = atlas.AllenAtlas(25) class LoadDataLocal: def __init__(self): self.brain_atlas = atlas.AllenAtlas(25) self.folder_path = [] self.chn_coords = [] self.sess_path = [] self.brain_atlas = atlas.AllenAtlas(25) def get_info(self, folder_path): """ Read in the local json file to see if any previous alignments exist """ self.folder_path = folder_path return self.get_previous_alignments() def get_previous_alignments(self): # If previous alignment json file exists, read in previous alignments if Path(self.folder_path, 'prev_alignments.json').exists(): with open(Path(self.folder_path, 'prev_alignments.json'), "r") as f: self.alignments = json.load(f) self.prev_align = [] if self.alignments: self.prev_align = [*self.alignments.keys()] self.prev_align = sorted(self.prev_align, reverse=True) self.prev_align.append('original') else: self.alignments = [] self.prev_align = ['original'] return self.prev_align def get_starting_alignment(self, idx): """ Find out the starting alignmnet """ align = self.prev_align[idx] if align == 'original': feature = None track = None else: feature = np.array(self.alignments[align][0]) track = np.array(self.alignments[align][1]) return feature, track def get_data(self): # Define alf_path and ephys_path (a bit redundant but so it is compatible with plot data) alf_path = self.folder_path ephys_path = self.folder_path self.chn_coords = np.load(Path(alf_path, 'channels.localCoordinates.npy')) chn_depths = self.chn_coords[:, 1] # Read in notes for this experiment see if file exists in directory if Path(self.folder_path, 'session_notes.txt').exists(): with open(Path(self.folder_path, 'session_notes.txt'), "r") as f: sess_notes = f.read() else: sess_notes = 'No notes for this session' return alf_path, ephys_path, chn_depths, sess_notes def get_allen_csv(self): allen_path = Path(Path(atlas.__file__).parent, 'allen_structure_tree.csv') self.allen = alf.io.load_file_content(allen_path) return self.allen def get_xyzpicks(self): # Read in local xyz_picks file # This file must exist, otherwise we don't know where probe was assert(Path(self.folder_path, 'xyz_picks.json').exists()) with open(Path(self.folder_path, 'xyz_picks.json'), "r") as f: user_picks = json.load(f) xyz_picks = np.array(user_picks['xyz_picks']) / 1e6 return xyz_picks def get_slice_images(self, xyz_channels): # First see if the histology file exists before attempting to connect with FlatIron and # download path_to_rd_image = glob.glob(str(self.folder_path) + '/*RD.nrrd') if path_to_rd_image: hist_path_rd = Path(path_to_rd_image[0]) else: hist_path_rd = [] path_to_gr_image = glob.glob(str(self.folder_path) + '/*GR.nrrd') if path_to_gr_image: hist_path_gr = Path(path_to_gr_image[0]) else: hist_path_gr = [] index = self.brain_atlas.bc.xyz2i(xyz_channels)[:, self.brain_atlas.xyz2dims] ccf_slice = self.brain_atlas.image[index[:, 0], :, index[:, 2]] ccf_slice = np.swapaxes(ccf_slice, 0, 1) label_slice = self.brain_atlas._label2rgb(self.brain_atlas.label[index[:, 0], :, index[:, 2]]) label_slice = np.swapaxes(label_slice, 0, 1) width = [self.brain_atlas.bc.i2x(0), self.brain_atlas.bc.i2x(456)] height = [self.brain_atlas.bc.i2z(index[0, 2]), self.brain_atlas.bc.i2z(index[-1, 2])] if hist_path_rd: hist_atlas_rd = atlas.AllenAtlas(hist_path=hist_path_rd) hist_slice_rd = hist_atlas_rd.image[index[:, 0], :, index[:, 2]] hist_slice_rd = np.swapaxes(hist_slice_rd, 0, 1) else: print('Could not find red histology image for this subject') hist_slice_rd = np.copy(ccf_slice) if hist_path_gr: hist_atlas_gr = atlas.AllenAtlas(hist_path=hist_path_gr) hist_slice_gr = hist_atlas_gr.image[index[:, 0], :, index[:, 2]] hist_slice_gr = np.swapaxes(hist_slice_gr, 0, 1) else: print('Could not find green histology image for this subject') hist_slice_gr = np.copy(ccf_slice) slice_data = { 'hist_rd': hist_slice_rd, 'hist_gr': hist_slice_gr, 'ccf': ccf_slice, 'label': label_slice, 'scale': np.array([(width[-1] - width[0]) / ccf_slice.shape[0], (height[-1] - height[0]) / ccf_slice.shape[1]]), 'offset': np.array([width[0], height[0]]) } return slice_data def get_region_description(self, region_idx): struct_idx = np.where(self.allen['id'] == region_idx)[0][0] # Haven't yet incorporated how to have region descriptions when not on Alyx # For now always have this as blank description = '' region_lookup = self.allen['acronym'][struct_idx] + ': ' + self.allen['name'][struct_idx] if region_lookup == 'void: void': region_lookup = 'root: root' if not description: description = region_lookup + '\nNo information available for this region' else: description = region_lookup + '\n' + description return description, region_lookup def upload_data(self, feature, track, xyz_channels): brain_regions = self.brain_atlas.regions.get(self.brain_atlas.get_labels (xyz_channels)) brain_regions['xyz'] = xyz_channels brain_regions['lateral'] = self.chn_coords[:, 0] brain_regions['axial'] = self.chn_coords[:, 1] assert np.unique([len(brain_regions[k]) for k in brain_regions]).size == 1 channel_dict = self.create_channel_dict(brain_regions) bregma = atlas.ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'].tolist() origin = {'origin': {'bregma': bregma}} channel_dict.update(origin) # Save the channel locations with open(Path(self.folder_path, 'channel_locations.json'), "w") as f: json.dump(channel_dict, f, indent=2, separators=(',', ': ')) original_json = self.alignments date = datetime.now().replace(microsecond=0).isoformat() data = {date: [feature.tolist(), track.tolist()]} if original_json: original_json.update(data) else: original_json = data # Save the new alignment with open(Path(self.folder_path, 'prev_alignments.json'), "w") as f: json.dump(original_json, f, indent=2, separators=(',', ': ')) @staticmethod def create_channel_dict(brain_regions): """ Create channel dictionary in form to write to json file :param brain_regions: information about location of electrode channels in brain atlas :type brain_regions: Bunch :return channel_dict: :type channel_dict: dictionary of dictionaries """ channel_dict = {} for i in np.arange(brain_regions.id.size): channel = { 'x': brain_regions.xyz[i, 0] * 1e6, 'y': brain_regions.xyz[i, 1] * 1e6, 'z': brain_regions.xyz[i, 2] * 1e6, 'axial': brain_regions.axial[i], 'lateral': brain_regions.lateral[i], 'brain_region_id': int(brain_regions.id[i]), 'brain_region': brain_regions.acronym[i] } data = {'channel_' + str(i): channel} channel_dict.update(data) return channel_dict
import numpy as np from datetime import datetime import ibllib.atlas as atlas from pathlib import Path import alf.io import glob import json # brain_atlas = atlas.AllenAtlas(25) class LoadDataLocal: def __init__(self): self.brain_atlas = atlas.AllenAtlas(25) self.folder_path = [] self.chn_coords = [] self.sess_path = [] self.brain_atlas = atlas.AllenAtlas(25) def get_info(self, folder_path): """ Read in the local json file to see if any previous alignments exist """ self.folder_path = folder_path return self.get_previous_alignments() def get_previous_alignments(self): # If previous alignment json file exists, read in previous alignments if Path(self.folder_path, 'prev_alignments.json').exists(): with open(Path(self.folder_path, 'prev_alignments.json'), "r") as f: self.alignments = json.load(f) self.prev_align = [] if self.alignments: self.prev_align = [*self.alignments.keys()] self.prev_align = sorted(self.prev_align, reverse=True) self.prev_align.append('original') else: self.alignments = [] self.prev_align = ['original'] return self.prev_align def get_starting_alignment(self, idx): """ Find out the starting alignmnet """ align = self.prev_align[idx] if align == 'original': feature = None track = None else: feature = np.array(self.alignments[align][0]) track = np.array(self.alignments[align][1]) return feature, track def get_data(self): # Define alf_path and ephys_path (a bit redundant but so it is compatible with plot data) alf_path = self.folder_path ephys_path = self.folder_path self.chn_coords = np.load(Path(alf_path, 'channels.localCoordinates.npy')) chn_depths = self.chn_coords[:, 1] # Read in notes for this experiment see if file exists in directory if Path(self.folder_path, 'session_notes.txt').exists(): with open(Path(self.folder_path, 'session_notes.txt'), "r") as f: sess_notes = f.read() else: sess_notes = 'No notes for this session' return alf_path, ephys_path, chn_depths, sess_notes def get_allen_csv(self): allen_path = Path(Path(atlas.__file__).parent, 'allen_structure_tree.csv') self.allen = alf.io.load_file_content(allen_path) return self.allen def get_xyzpicks(self): # Read in local xyz_picks file # This file must exist, otherwise we don't know where probe was assert(Path(self.folder_path, 'xyz_picks.json').exists()) with open(Path(self.folder_path, 'xyz_picks.json'), "r") as f: user_picks = json.load(f) xyz_picks = np.array(user_picks['xyz_picks']) / 1e6 return xyz_picks def get_slice_images(self, xyz_channels): # First see if the histology file exists before attempting to connect with FlatIron and # download path_to_rd_image = glob.glob(str(self.folder_path) + '/*RD.nrrd') if path_to_rd_image: hist_path_rd = Path(path_to_rd_image[0]) else: hist_path_rd = [] path_to_gr_image = glob.glob(str(self.folder_path) + '/*GR.nrrd') if path_to_gr_image: hist_path_gr = Path(path_to_gr_image[0]) else: hist_path_gr = [] index = self.brain_atlas.bc.xyz2i(xyz_channels)[:, self.brain_atlas.xyz2dims] ccf_slice = self.brain_atlas.image[index[:, 0], :, index[:, 2]] ccf_slice = np.swapaxes(ccf_slice, 0, 1) label_slice = self.brain_atlas._label2rgb(self.brain_atlas.label[index[:, 0], :, index[:, 2]]) label_slice = np.swapaxes(label_slice, 0, 1) width = [self.brain_atlas.bc.i2x(0), self.brain_atlas.bc.i2x(456)] height = [self.brain_atlas.bc.i2z(index[0, 2]), self.brain_atlas.bc.i2z(index[-1, 2])] if hist_path_rd: hist_atlas_rd = atlas.AllenAtlas(hist_path=hist_path_rd) hist_slice_rd = hist_atlas_rd.image[index[:, 0], :, index[:, 2]] hist_slice_rd = np.swapaxes(hist_slice_rd, 0, 1) else: print('Could not find red histology image for this subject') hist_slice_rd = np.copy(ccf_slice) if hist_path_gr: hist_atlas_gr = atlas.AllenAtlas(hist_path=hist_path_gr) hist_slice_gr = hist_atlas_gr.image[index[:, 0], :, index[:, 2]] hist_slice_gr = np.swapaxes(hist_slice_gr, 0, 1) else: print('Could not find green histology image for this subject') hist_slice_gr = np.copy(ccf_slice) slice_data = { 'hist_rd': hist_slice_rd, 'hist_gr': hist_slice_gr, 'ccf': ccf_slice, 'label': label_slice, 'scale': np.array([(width[-1] - width[0]) / ccf_slice.shape[0], (height[-1] - height[0]) / ccf_slice.shape[1]]), 'offset': np.array([width[0], height[0]]) } return slice_data def get_region_description(self, region_idx): struct_idx = np.where(self.allen['id'] == region_idx)[0][0] # Haven't yet incorporated how to have region descriptions when not on Alyx # For now always have this as blank description = '' region_lookup = self.allen['acronym'][struct_idx] + ': ' + self.allen['name'][struct_idx] if region_lookup == 'void: void': region_lookup = 'root: root' if not description: description = region_lookup + '\nNo information available for this region' else: description = region_lookup + '\n' + description return description, region_lookup def upload_data(self, feature, track, xyz_channels): brain_regions = self.brain_atlas.regions.get(self.brain_atlas.get_labels (xyz_channels)) brain_regions['xyz'] = xyz_channels brain_regions['lateral'] = self.chn_coords[:, 0] brain_regions['axial'] = self.chn_coords[:, 1] assert np.unique([len(brain_regions[k]) for k in brain_regions]).size == 1 channel_dict = self.create_channel_dict(brain_regions) bregma = atlas.ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'].tolist() origin = {'origin': {'bregma': bregma}} channel_dict.update(origin) # Save the channel locations with open(Path(self.folder_path, 'channel_locations.json'), "w") as f: json.dump(channel_dict, f, indent=2, separators=(',', ': ')) original_json = self.alignments date = datetime.now().replace(microsecond=0).isoformat() data = {date: [feature.tolist(), track.tolist()]} if original_json: original_json.update(data) else: original_json = data # Save the new alignment with open(Path(self.folder_path, 'prev_alignments.json'), "w") as f: json.dump(original_json, f, indent=2, separators=(',', ': ')) @staticmethod def create_channel_dict(brain_regions): """ Create channel dictionary in form to write to json file :param brain_regions: information about location of electrode channels in brain atlas :type brain_regions: Bunch :return channel_dict: :type channel_dict: dictionary of dictionaries """ channel_dict = {} for i in np.arange(brain_regions.id.size): channel = { 'x': brain_regions.xyz[i, 0] * 1e6, 'y': brain_regions.xyz[i, 1] * 1e6, 'z': brain_regions.xyz[i, 2] * 1e6, 'axial': brain_regions.axial[i], 'lateral': brain_regions.lateral[i], 'brain_region_id': int(brain_regions.id[i]), 'brain_region': brain_regions.acronym[i] } data = {'channel_' + str(i): channel} channel_dict.update(data) return channel_dict
en
0.865802
# brain_atlas = atlas.AllenAtlas(25) Read in the local json file to see if any previous alignments exist # If previous alignment json file exists, read in previous alignments Find out the starting alignmnet # Define alf_path and ephys_path (a bit redundant but so it is compatible with plot data) # Read in notes for this experiment see if file exists in directory # Read in local xyz_picks file # This file must exist, otherwise we don't know where probe was # First see if the histology file exists before attempting to connect with FlatIron and # download # Haven't yet incorporated how to have region descriptions when not on Alyx # For now always have this as blank # Save the channel locations # Save the new alignment Create channel dictionary in form to write to json file :param brain_regions: information about location of electrode channels in brain atlas :type brain_regions: Bunch :return channel_dict: :type channel_dict: dictionary of dictionaries
2.512211
3
conpaas-director/cpsdirector/iaas/clouds/openstack.py
bopopescu/conpaas-2
5
6629845
import math, time from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.exceptions import BaseHTTPError from .base import Cloud class OpenStackCloud(Cloud): def __init__(self, cloud_name, iaas_config): Cloud.__init__(self, cloud_name) self.config = iaas_config self.cloud_name = cloud_name cloud_params = [ 'USER', 'PASSWORD', 'HOST', 'IMAGE_ID', 'SIZE_ID', 'KEY_NAME', 'SECURITY_GROUP_NAME', ] self._check_cloud_params(self.config, cloud_params) self.user = self.config.get(self.cloud_name, 'USER') self.passwd = self.config.get(self.cloud_name, 'PASSWORD') self.host = self.config.get(self.cloud_name, 'HOST') def get_by_id_or_name(self, label, id_or_name, rlist): by_id = filter(lambda x: x.id==id_or_name, rlist) if len(by_id) > 0: return by_id[0] by_name = filter(lambda x: x.name==id_or_name, rlist) if len(by_name) > 0: return by_name[0] raise ValueError('%s is not a valid value for %s' % (id_or_name, label)) def set_instance_attributes(self): self.img = self.get_by_id_or_name('IMAGE_ID', self.config.get(self.cloud_name, 'IMAGE_ID'), self.driver.list_images()) self.size = self.get_by_id_or_name('SIZE_ID', self.config.get(self.cloud_name, 'SIZE_ID'), self.driver.list_sizes()) self.network = None if self.config.has_option(self.cloud_name, 'NETWORK_ID'): self.network = self.get_by_id_or_name('NETWORK_ID', self.config.get(self.cloud_name, 'NETWORK_ID'), self.driver.ex_list_networks()) self.floating_pool = self.driver.ex_list_floating_ip_pools()[0] self.key_name = self.config.get(self.cloud_name, 'KEY_NAME') self.sg = self.config.get(self.cloud_name, 'SECURITY_GROUP_NAME') self.auto_assing_floating = True if self.config.has_option(self.cloud_name, 'AUTO_ASSIGN_FLOATING_IP'): self.auto_assing_floating = self.config.getboolean(self.cloud_name, 'AUTO_ASSIGN_FLOATING_IP') def get_cloud_type(self): return 'openstack' # connect to openstack cloud def _connect(self): # Driver = get_driver(Provider.EUCALYPTUS) Driver = get_driver(Provider.OPENSTACK) # self.driver = Driver(self.user, self.passwd, secure=False, # host=self.host, port=8773, path='/services/Cloud') self.driver = Driver(self.user, self.passwd, secure=False, ex_force_auth_url='http://%s:5000' % self.host, ex_force_auth_version='2.0_password', ex_tenant_name=self.user) self.set_instance_attributes() self.connected = True def config(self, config_params={}, context=None): if context is not None: self._context = context # def new_instances(self, count, name='conpaas', inst_type=None, volumes={}): # if self.connected is False: # self._connect() # flavor = self.size # if inst_type is not None: # flavor = self.get_by_id_or_name('SIZE_ID', inst_type, self.driver.list_sizes()) # kwargs = { # 'size': flavor, # 'image': self.img, # 'name': name, # 'ex_mincount': str(count), # 'ex_maxcount': str(count), # 'ex_securitygroup': self.sg, # 'ex_keyname': self.key_name, # 'ex_userdata': self.get_context() # } # if self.network: # kwargs['networks'] = [self.network] # lc_nodes = self.driver.create_node(**kwargs) # if not self.auto_assing_floating: # self.associate_floating_ips(lc_nodes) # nodes = self._create_service_nodes(lc_nodes) # if count > 1: # return nodes # return [ nodes ] def new_instances(self, nodes_info): if self.connected is False: self._connect() lc_nodes = [] for node_info in nodes_info: flavor = self.size if 'inst_type' in node_info and node_info['inst_type'] is not None: flavor = self.get_by_id_or_name('SIZE_ID', node_info['inst_type'], self.driver.list_sizes()) kwargs = { 'size': flavor, 'image': self.img, 'name': node_info['name'], 'ex_mincount': 1, 'ex_maxcount': 1, 'ex_securitygroup': self.sg, 'ex_keyname': self.key_name, 'ex_userdata': self.get_context() } if self.network: kwargs['networks'] = [self.network] lc_node = self.driver.create_node(**kwargs) node_info['id'] = lc_node.id if 'volumes' in node_info: for vol in node_info['volumes']: vol['vm_id'] = lc_node.id vol['vol_name'] = vol['vol_name'] % vol lc_volume = self.create_volume(vol['vol_size'], vol['vol_name'], vol['vm_id']) vol['vol_id'] = lc_volume.id class volume: id = vol['vol_id'] class node: id = vol['vm_id'] self.attach_volume(node, volume, vol['dev_name']) lc_nodes += [lc_node] if not self.auto_assing_floating: self.associate_floating_ips(lc_nodes) nodes = self._create_service_nodes(lc_nodes, nodes_info) return nodes def associate_floating_ips(self, instances): if type(instances) is not list: instances = [instances] nr_attempts = 3 * len(instances) for instance in instances: while nr_attempts > 0: try: self.driver.ex_attach_floating_ip_to_node(instance, self.get_floating_ip().ip_address) break except BaseHTTPError: self.logger.debug('Attaching IP failed probly because the VM was not on a network yet, let\'s wait a bit and retry') time.sleep(1) nr_attempts -= 1 if nr_attempts == 0: raise Exception('Error assigning floating IPs') def get_floating_ip(self): free_fips = filter(lambda x: x.node_id==None, self.driver.ex_list_floating_ips()) return free_fips[0] if len(free_fips) > 0 else self.floating_pool.create_floating_ip() def create_volume(self, size, name, vm_id=None): # OpenStack expects volume size in GiB. size /= 1024.0 size = int(math.ceil(size)) return self.driver.create_volume(size, name) def attach_volume(self, node, volume, device): device = '/dev/%s' % device trials = 10 while trials > 0: trials -= 1 try: attach_res = self.driver.attach_volume(node, volume, device) break except Exception, err: # FIXME: be more specific self.logger.debug('Attaching volume failed. Error: %s' % err) attach_res = False time.sleep(5) self.logger.debug('Attaching volume succeeded. Result: %s' % attach_res) return attach_res def detach_volume(self, volume): volume = filter(lambda x: x.id==volume.id, self.driver.list_volumes())[0] return self.driver.detach_volume(volume) def list_instance_volumes(self, instance): return filter(lambda x: False if len(x.extra['attachments'])==0 else x.extra['attachments'][0]['serverId']==instance.id, self.driver.list_volumes())
import math, time from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.exceptions import BaseHTTPError from .base import Cloud class OpenStackCloud(Cloud): def __init__(self, cloud_name, iaas_config): Cloud.__init__(self, cloud_name) self.config = iaas_config self.cloud_name = cloud_name cloud_params = [ 'USER', 'PASSWORD', 'HOST', 'IMAGE_ID', 'SIZE_ID', 'KEY_NAME', 'SECURITY_GROUP_NAME', ] self._check_cloud_params(self.config, cloud_params) self.user = self.config.get(self.cloud_name, 'USER') self.passwd = self.config.get(self.cloud_name, 'PASSWORD') self.host = self.config.get(self.cloud_name, 'HOST') def get_by_id_or_name(self, label, id_or_name, rlist): by_id = filter(lambda x: x.id==id_or_name, rlist) if len(by_id) > 0: return by_id[0] by_name = filter(lambda x: x.name==id_or_name, rlist) if len(by_name) > 0: return by_name[0] raise ValueError('%s is not a valid value for %s' % (id_or_name, label)) def set_instance_attributes(self): self.img = self.get_by_id_or_name('IMAGE_ID', self.config.get(self.cloud_name, 'IMAGE_ID'), self.driver.list_images()) self.size = self.get_by_id_or_name('SIZE_ID', self.config.get(self.cloud_name, 'SIZE_ID'), self.driver.list_sizes()) self.network = None if self.config.has_option(self.cloud_name, 'NETWORK_ID'): self.network = self.get_by_id_or_name('NETWORK_ID', self.config.get(self.cloud_name, 'NETWORK_ID'), self.driver.ex_list_networks()) self.floating_pool = self.driver.ex_list_floating_ip_pools()[0] self.key_name = self.config.get(self.cloud_name, 'KEY_NAME') self.sg = self.config.get(self.cloud_name, 'SECURITY_GROUP_NAME') self.auto_assing_floating = True if self.config.has_option(self.cloud_name, 'AUTO_ASSIGN_FLOATING_IP'): self.auto_assing_floating = self.config.getboolean(self.cloud_name, 'AUTO_ASSIGN_FLOATING_IP') def get_cloud_type(self): return 'openstack' # connect to openstack cloud def _connect(self): # Driver = get_driver(Provider.EUCALYPTUS) Driver = get_driver(Provider.OPENSTACK) # self.driver = Driver(self.user, self.passwd, secure=False, # host=self.host, port=8773, path='/services/Cloud') self.driver = Driver(self.user, self.passwd, secure=False, ex_force_auth_url='http://%s:5000' % self.host, ex_force_auth_version='2.0_password', ex_tenant_name=self.user) self.set_instance_attributes() self.connected = True def config(self, config_params={}, context=None): if context is not None: self._context = context # def new_instances(self, count, name='conpaas', inst_type=None, volumes={}): # if self.connected is False: # self._connect() # flavor = self.size # if inst_type is not None: # flavor = self.get_by_id_or_name('SIZE_ID', inst_type, self.driver.list_sizes()) # kwargs = { # 'size': flavor, # 'image': self.img, # 'name': name, # 'ex_mincount': str(count), # 'ex_maxcount': str(count), # 'ex_securitygroup': self.sg, # 'ex_keyname': self.key_name, # 'ex_userdata': self.get_context() # } # if self.network: # kwargs['networks'] = [self.network] # lc_nodes = self.driver.create_node(**kwargs) # if not self.auto_assing_floating: # self.associate_floating_ips(lc_nodes) # nodes = self._create_service_nodes(lc_nodes) # if count > 1: # return nodes # return [ nodes ] def new_instances(self, nodes_info): if self.connected is False: self._connect() lc_nodes = [] for node_info in nodes_info: flavor = self.size if 'inst_type' in node_info and node_info['inst_type'] is not None: flavor = self.get_by_id_or_name('SIZE_ID', node_info['inst_type'], self.driver.list_sizes()) kwargs = { 'size': flavor, 'image': self.img, 'name': node_info['name'], 'ex_mincount': 1, 'ex_maxcount': 1, 'ex_securitygroup': self.sg, 'ex_keyname': self.key_name, 'ex_userdata': self.get_context() } if self.network: kwargs['networks'] = [self.network] lc_node = self.driver.create_node(**kwargs) node_info['id'] = lc_node.id if 'volumes' in node_info: for vol in node_info['volumes']: vol['vm_id'] = lc_node.id vol['vol_name'] = vol['vol_name'] % vol lc_volume = self.create_volume(vol['vol_size'], vol['vol_name'], vol['vm_id']) vol['vol_id'] = lc_volume.id class volume: id = vol['vol_id'] class node: id = vol['vm_id'] self.attach_volume(node, volume, vol['dev_name']) lc_nodes += [lc_node] if not self.auto_assing_floating: self.associate_floating_ips(lc_nodes) nodes = self._create_service_nodes(lc_nodes, nodes_info) return nodes def associate_floating_ips(self, instances): if type(instances) is not list: instances = [instances] nr_attempts = 3 * len(instances) for instance in instances: while nr_attempts > 0: try: self.driver.ex_attach_floating_ip_to_node(instance, self.get_floating_ip().ip_address) break except BaseHTTPError: self.logger.debug('Attaching IP failed probly because the VM was not on a network yet, let\'s wait a bit and retry') time.sleep(1) nr_attempts -= 1 if nr_attempts == 0: raise Exception('Error assigning floating IPs') def get_floating_ip(self): free_fips = filter(lambda x: x.node_id==None, self.driver.ex_list_floating_ips()) return free_fips[0] if len(free_fips) > 0 else self.floating_pool.create_floating_ip() def create_volume(self, size, name, vm_id=None): # OpenStack expects volume size in GiB. size /= 1024.0 size = int(math.ceil(size)) return self.driver.create_volume(size, name) def attach_volume(self, node, volume, device): device = '/dev/%s' % device trials = 10 while trials > 0: trials -= 1 try: attach_res = self.driver.attach_volume(node, volume, device) break except Exception, err: # FIXME: be more specific self.logger.debug('Attaching volume failed. Error: %s' % err) attach_res = False time.sleep(5) self.logger.debug('Attaching volume succeeded. Result: %s' % attach_res) return attach_res def detach_volume(self, volume): volume = filter(lambda x: x.id==volume.id, self.driver.list_volumes())[0] return self.driver.detach_volume(volume) def list_instance_volumes(self, instance): return filter(lambda x: False if len(x.extra['attachments'])==0 else x.extra['attachments'][0]['serverId']==instance.id, self.driver.list_volumes())
en
0.297258
# connect to openstack cloud # Driver = get_driver(Provider.EUCALYPTUS) # self.driver = Driver(self.user, self.passwd, secure=False, # host=self.host, port=8773, path='/services/Cloud') # def new_instances(self, count, name='conpaas', inst_type=None, volumes={}): # if self.connected is False: # self._connect() # flavor = self.size # if inst_type is not None: # flavor = self.get_by_id_or_name('SIZE_ID', inst_type, self.driver.list_sizes()) # kwargs = { # 'size': flavor, # 'image': self.img, # 'name': name, # 'ex_mincount': str(count), # 'ex_maxcount': str(count), # 'ex_securitygroup': self.sg, # 'ex_keyname': self.key_name, # 'ex_userdata': self.get_context() # } # if self.network: # kwargs['networks'] = [self.network] # lc_nodes = self.driver.create_node(**kwargs) # if not self.auto_assing_floating: # self.associate_floating_ips(lc_nodes) # nodes = self._create_service_nodes(lc_nodes) # if count > 1: # return nodes # return [ nodes ] # OpenStack expects volume size in GiB. # FIXME: be more specific
2.295928
2
conf/mpiscanner.py
maxhgerlach/mpi4py
533
6629846
# Very, very naive RE-based way for collecting declarations inside # 'cdef extern from *' Cython blocks in in source files, and next # generate compatibility headers for MPI-2 partially implemented or # built, or MPI-1 implementations, perhaps providing a subset of MPI-2 from textwrap import dedent from warnings import warn import mpiregexes as Re class Node(object): REGEX = None def match(self, line): m = self.REGEX.search(line) if m: return m.groups() match = classmethod(match) HEADER = None CONFIG = None MISSING = None MISSING_HEAD = """\ #ifndef PyMPI_HAVE_%(name)s #undef %(cname)s """ MISSING_TAIL = """ #endif """ def init(self, name, **kargs): assert name is not None self.name = name self.__dict__.update(kargs) def header(self): line = dedent(self.HEADER) % vars(self) line = line.replace('\n', '') line = line.replace(' ', ' ') return line + '\n' def config(self): return dedent(self.CONFIG) % vars(self) def missing(self, guard=True): if guard: head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) else: head = '#undef %(cname)s\n' tail = '\n\n' body = dedent(self.MISSING) return (head+body+tail) % vars(self) class NodeType(Node): CONFIG = """\ %(ctype)s v; %(ctype)s* p; (void)v; (void)p;""" def __init__(self, ctype): self.init(name=ctype, cname=ctype, ctype=ctype,) class NodeStructType(NodeType): HEADER = """\ typedef struct {%(cfields)s ...; } %(ctype)s;""" MISSING = """\ typedef struct PyMPI_%(ctype)s { %(cfields)s } PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, ctype, cfields): super(NodeStructType, self).__init__(ctype) self.cfields = '\n'.join([' %s %s;' % field for field in cfields]) class NodeFuncType(NodeType): HEADER = """\ typedef %(crett)s (%(cname)s)(%(cargs)s);""" MISSING = """\ typedef %(crett)s (MPIAPI PyMPI_%(cname)s)(%(cargs)s); #define %(cname)s PyMPI_%(cname)s""" def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname, ctype=cname+'*',) self.crett = crett self.cargs = cargs or 'void' if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class NodeValue(Node): HEADER = """\ const %(ctype)s %(cname)s;""" CONFIG = """\ %(ctype)s v; v = %(cname)s; (void)v;""" MISSING = '#define %(cname)s (%(calias)s)' def __init__(self, ctype, cname, calias): self.init(name=cname, cname=cname, ctype=ctype, calias=calias) if ctype.endswith('*'): ctype = ctype + ' const' self.HEADER = ctype + ' %(cname)s;' class NodePtrVal(NodeValue): MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' def ctypefix(ct): ct = ct.strip() ct = ct.replace('[][3]',' (*)[3]') ct = ct.replace('[]','*') return ct class NodeFuncProto(Node): HEADER = """\ %(crett)s %(cname)s(%(cargs)s);""" CONFIG = """\ %(crett)s v; v = %(cname)s(%(cargscall)s); (void)v;""" MISSING = ' '. join(['#define %(cname)s(%(cargsnamed)s)', 'PyMPI_UNAVAILABLE("%(name)s"%(comma)s%(cargsnamed)s)']) def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname) self.crett = crett self.cargs = cargs or 'void' if cargs == 'void': cargs = '' if cargs: cargs = cargs.split(',') if cargs[-1].strip() == '...': del cargs[-1] else: cargs = [] self.cargstype = cargs nargs = len(cargs) if nargs: self.comma = ',' else: self.comma = '' cargscall = ['(%s)0' % ctypefix(a) for a in cargs] self.cargscall = ','.join(cargscall) cargsnamed = ['a%d' % (a+1) for a in range(nargs)] self.cargsnamed = ','.join(cargsnamed) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class IntegralType(NodeType): REGEX = Re.INTEGRAL_TYPE HEADER = """\ typedef %(cbase)s... %(ctype)s;""" MISSING = """\ typedef %(ctdef)s PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, cbase, ctype, calias=None): super(IntegralType, self).__init__(ctype) self.cbase = cbase if calias is not None: self.ctdef = calias else: self.ctdef = cbase class StructType(NodeStructType): REGEX = Re.STRUCT_TYPE def __init__(self, ctype, calias=None): cfields = [] if ctype == 'MPI_Status': cnames = ['MPI_SOURCE', 'MPI_TAG', 'MPI_ERROR'] cfields = list(zip(['int']*3, cnames)) super(StructType, self).__init__(ctype, cfields) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class OpaqueType(NodeType): REGEX = Re.OPAQUE_TYPE HEADER = """\ typedef struct{...;} %(ctype)s;""" MISSING = """\ typedef void *PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" class FunctionType(NodeFuncType): REGEX = Re.FUNCTION_TYPE class EnumValue(NodeValue): REGEX = Re.ENUM_VALUE def __init__(self, cname, calias): self.init(name=cname, cname=cname, ctype='int', calias=calias) class HandleValue(NodeValue): REGEX = Re.HANDLE_VALUE MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' class BasicPtrVal(NodePtrVal): REGEX = Re.BASIC_PTRVAL class IntegralPtrVal(NodePtrVal): REGEX = Re.INTEGRAL_PTRVAL class StructPtrVal(NodePtrVal): REGEX = Re.STRUCT_PTRVAL class FunctionPtrVal(NodePtrVal): REGEX = Re.FUNCT_PTRVAL class FunctionProto(NodeFuncProto): REGEX = Re.FUNCTION_PROTO class FunctionC2F(NodeFuncProto): REGEX = Re.FUNCTION_C2F MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '((%(crett)s)0)']) class FunctionF2C(NodeFuncProto): REGEX = Re.FUNCTION_F2C MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '%(cretv)s']) def __init__(self, *a, **k): NodeFuncProto.__init__(self, *a, **k) self.cretv = self.crett.upper() + '_NULL' class Scanner(object): NODE_TYPES = [ IntegralType, StructType, OpaqueType, HandleValue, EnumValue, BasicPtrVal, IntegralPtrVal, StructPtrVal, FunctionType, FunctionPtrVal, FunctionProto, FunctionC2F, FunctionF2C, ] def __init__(self): self.nodes = [] self.nodemap = {} def parse_file(self, filename): with open(filename) as f: self.parse_lines(f) def parse_lines(self, lines): for line in lines: self.parse_line(line) def parse_line(self, line): if Re.IGNORE.match(line): return nodemap = self.nodemap nodelist = self.nodes for nodetype in self.NODE_TYPES: args = nodetype.match(line) if args: node = nodetype(*args) assert node.name not in nodemap, node.name nodemap[node.name] = len(nodelist) nodelist.append(node) break if not args: warn('unmatched line:\n%s' % line) def __iter__(self): return iter(self.nodes) def __getitem__(self, name): return self.nodes[self.nodemap[name]] def dump_header_h(self, fileobj): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_header_h(f) return for node in self: fileobj.write(node.header()) CONFIG_HEAD = """\ #ifndef PyMPI_CONFIG_H #define PyMPI_CONFIG_H """ CONFIG_MACRO = 'PyMPI_HAVE_%s' CONFIG_TAIL = """\ #endif /* !PyMPI_CONFIG_H */ """ def dump_config_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_config_h(f, suite) return head = dedent(self.CONFIG_HEAD) macro = dedent(self.CONFIG_MACRO) tail = dedent(self.CONFIG_TAIL) fileobj.write(head) if suite is None: for node in self: line = '#undef %s\n' % ((macro % node.name)) fileobj.write(line) else: for name, result in suite: assert name in self.nodemap if result: line = '#define %s 1\n' % ((macro % name)) else: line = '#undef %s\n' % ((macro % name)) fileobj.write(line) fileobj.write(tail) MISSING_HEAD = """\ #ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # if !defined(__cplusplus) || (__GNUC__>3||(__GNUC__==3&&__GNUC_MINOR__>=4)) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif # elif defined(__INTEL_COMPILER) || defined(__ICC) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xaaaaaaaa*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } """ MISSING_TAIL = """\ #endif /* !PyMPI_MISSING_H */ """ def dump_missing_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_missing_h(f, suite) return head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) # fileobj.write(head) if suite is None: for node in self: fileobj.write(node.missing()) else: for name, result in suite: node = self[name] if not result: fileobj.write(node.missing()) fileobj.write(tail) # ----------------------------------------- if __name__ == '__main__': import sys, os sources = [os.path.join('src', 'mpi4py', 'libmpi.pxd')] log = lambda msg: sys.stderr.write(msg + '\n') scanner = Scanner() for filename in sources: log('parsing file %s' % filename) scanner.parse_file(filename) log('processed %d definitions' % len(scanner.nodes)) config_h = os.path.join('src', 'lib-mpi', 'config', 'config.h') log('writing file %s' % config_h) scanner.dump_config_h(config_h, None) missing_h = os.path.join('src', 'lib-mpi', 'missing.h') log('writing file %s' % missing_h) scanner.dump_missing_h(missing_h, None) #libmpi_h = os.path.join('.', 'libmpi.h') #log('writing file %s' % libmpi_h) #scanner.dump_header_h(libmpi_h) # -----------------------------------------
# Very, very naive RE-based way for collecting declarations inside # 'cdef extern from *' Cython blocks in in source files, and next # generate compatibility headers for MPI-2 partially implemented or # built, or MPI-1 implementations, perhaps providing a subset of MPI-2 from textwrap import dedent from warnings import warn import mpiregexes as Re class Node(object): REGEX = None def match(self, line): m = self.REGEX.search(line) if m: return m.groups() match = classmethod(match) HEADER = None CONFIG = None MISSING = None MISSING_HEAD = """\ #ifndef PyMPI_HAVE_%(name)s #undef %(cname)s """ MISSING_TAIL = """ #endif """ def init(self, name, **kargs): assert name is not None self.name = name self.__dict__.update(kargs) def header(self): line = dedent(self.HEADER) % vars(self) line = line.replace('\n', '') line = line.replace(' ', ' ') return line + '\n' def config(self): return dedent(self.CONFIG) % vars(self) def missing(self, guard=True): if guard: head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) else: head = '#undef %(cname)s\n' tail = '\n\n' body = dedent(self.MISSING) return (head+body+tail) % vars(self) class NodeType(Node): CONFIG = """\ %(ctype)s v; %(ctype)s* p; (void)v; (void)p;""" def __init__(self, ctype): self.init(name=ctype, cname=ctype, ctype=ctype,) class NodeStructType(NodeType): HEADER = """\ typedef struct {%(cfields)s ...; } %(ctype)s;""" MISSING = """\ typedef struct PyMPI_%(ctype)s { %(cfields)s } PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, ctype, cfields): super(NodeStructType, self).__init__(ctype) self.cfields = '\n'.join([' %s %s;' % field for field in cfields]) class NodeFuncType(NodeType): HEADER = """\ typedef %(crett)s (%(cname)s)(%(cargs)s);""" MISSING = """\ typedef %(crett)s (MPIAPI PyMPI_%(cname)s)(%(cargs)s); #define %(cname)s PyMPI_%(cname)s""" def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname, ctype=cname+'*',) self.crett = crett self.cargs = cargs or 'void' if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class NodeValue(Node): HEADER = """\ const %(ctype)s %(cname)s;""" CONFIG = """\ %(ctype)s v; v = %(cname)s; (void)v;""" MISSING = '#define %(cname)s (%(calias)s)' def __init__(self, ctype, cname, calias): self.init(name=cname, cname=cname, ctype=ctype, calias=calias) if ctype.endswith('*'): ctype = ctype + ' const' self.HEADER = ctype + ' %(cname)s;' class NodePtrVal(NodeValue): MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' def ctypefix(ct): ct = ct.strip() ct = ct.replace('[][3]',' (*)[3]') ct = ct.replace('[]','*') return ct class NodeFuncProto(Node): HEADER = """\ %(crett)s %(cname)s(%(cargs)s);""" CONFIG = """\ %(crett)s v; v = %(cname)s(%(cargscall)s); (void)v;""" MISSING = ' '. join(['#define %(cname)s(%(cargsnamed)s)', 'PyMPI_UNAVAILABLE("%(name)s"%(comma)s%(cargsnamed)s)']) def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname) self.crett = crett self.cargs = cargs or 'void' if cargs == 'void': cargs = '' if cargs: cargs = cargs.split(',') if cargs[-1].strip() == '...': del cargs[-1] else: cargs = [] self.cargstype = cargs nargs = len(cargs) if nargs: self.comma = ',' else: self.comma = '' cargscall = ['(%s)0' % ctypefix(a) for a in cargs] self.cargscall = ','.join(cargscall) cargsnamed = ['a%d' % (a+1) for a in range(nargs)] self.cargsnamed = ','.join(cargsnamed) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class IntegralType(NodeType): REGEX = Re.INTEGRAL_TYPE HEADER = """\ typedef %(cbase)s... %(ctype)s;""" MISSING = """\ typedef %(ctdef)s PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, cbase, ctype, calias=None): super(IntegralType, self).__init__(ctype) self.cbase = cbase if calias is not None: self.ctdef = calias else: self.ctdef = cbase class StructType(NodeStructType): REGEX = Re.STRUCT_TYPE def __init__(self, ctype, calias=None): cfields = [] if ctype == 'MPI_Status': cnames = ['MPI_SOURCE', 'MPI_TAG', 'MPI_ERROR'] cfields = list(zip(['int']*3, cnames)) super(StructType, self).__init__(ctype, cfields) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class OpaqueType(NodeType): REGEX = Re.OPAQUE_TYPE HEADER = """\ typedef struct{...;} %(ctype)s;""" MISSING = """\ typedef void *PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" class FunctionType(NodeFuncType): REGEX = Re.FUNCTION_TYPE class EnumValue(NodeValue): REGEX = Re.ENUM_VALUE def __init__(self, cname, calias): self.init(name=cname, cname=cname, ctype='int', calias=calias) class HandleValue(NodeValue): REGEX = Re.HANDLE_VALUE MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' class BasicPtrVal(NodePtrVal): REGEX = Re.BASIC_PTRVAL class IntegralPtrVal(NodePtrVal): REGEX = Re.INTEGRAL_PTRVAL class StructPtrVal(NodePtrVal): REGEX = Re.STRUCT_PTRVAL class FunctionPtrVal(NodePtrVal): REGEX = Re.FUNCT_PTRVAL class FunctionProto(NodeFuncProto): REGEX = Re.FUNCTION_PROTO class FunctionC2F(NodeFuncProto): REGEX = Re.FUNCTION_C2F MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '((%(crett)s)0)']) class FunctionF2C(NodeFuncProto): REGEX = Re.FUNCTION_F2C MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '%(cretv)s']) def __init__(self, *a, **k): NodeFuncProto.__init__(self, *a, **k) self.cretv = self.crett.upper() + '_NULL' class Scanner(object): NODE_TYPES = [ IntegralType, StructType, OpaqueType, HandleValue, EnumValue, BasicPtrVal, IntegralPtrVal, StructPtrVal, FunctionType, FunctionPtrVal, FunctionProto, FunctionC2F, FunctionF2C, ] def __init__(self): self.nodes = [] self.nodemap = {} def parse_file(self, filename): with open(filename) as f: self.parse_lines(f) def parse_lines(self, lines): for line in lines: self.parse_line(line) def parse_line(self, line): if Re.IGNORE.match(line): return nodemap = self.nodemap nodelist = self.nodes for nodetype in self.NODE_TYPES: args = nodetype.match(line) if args: node = nodetype(*args) assert node.name not in nodemap, node.name nodemap[node.name] = len(nodelist) nodelist.append(node) break if not args: warn('unmatched line:\n%s' % line) def __iter__(self): return iter(self.nodes) def __getitem__(self, name): return self.nodes[self.nodemap[name]] def dump_header_h(self, fileobj): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_header_h(f) return for node in self: fileobj.write(node.header()) CONFIG_HEAD = """\ #ifndef PyMPI_CONFIG_H #define PyMPI_CONFIG_H """ CONFIG_MACRO = 'PyMPI_HAVE_%s' CONFIG_TAIL = """\ #endif /* !PyMPI_CONFIG_H */ """ def dump_config_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_config_h(f, suite) return head = dedent(self.CONFIG_HEAD) macro = dedent(self.CONFIG_MACRO) tail = dedent(self.CONFIG_TAIL) fileobj.write(head) if suite is None: for node in self: line = '#undef %s\n' % ((macro % node.name)) fileobj.write(line) else: for name, result in suite: assert name in self.nodemap if result: line = '#define %s 1\n' % ((macro % name)) else: line = '#undef %s\n' % ((macro % name)) fileobj.write(line) fileobj.write(tail) MISSING_HEAD = """\ #ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # if !defined(__cplusplus) || (__GNUC__>3||(__GNUC__==3&&__GNUC_MINOR__>=4)) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif # elif defined(__INTEL_COMPILER) || defined(__ICC) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xaaaaaaaa*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } """ MISSING_TAIL = """\ #endif /* !PyMPI_MISSING_H */ """ def dump_missing_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_missing_h(f, suite) return head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) # fileobj.write(head) if suite is None: for node in self: fileobj.write(node.missing()) else: for name, result in suite: node = self[name] if not result: fileobj.write(node.missing()) fileobj.write(tail) # ----------------------------------------- if __name__ == '__main__': import sys, os sources = [os.path.join('src', 'mpi4py', 'libmpi.pxd')] log = lambda msg: sys.stderr.write(msg + '\n') scanner = Scanner() for filename in sources: log('parsing file %s' % filename) scanner.parse_file(filename) log('processed %d definitions' % len(scanner.nodes)) config_h = os.path.join('src', 'lib-mpi', 'config', 'config.h') log('writing file %s' % config_h) scanner.dump_config_h(config_h, None) missing_h = os.path.join('src', 'lib-mpi', 'missing.h') log('writing file %s' % missing_h) scanner.dump_missing_h(missing_h, None) #libmpi_h = os.path.join('.', 'libmpi.h') #log('writing file %s' % libmpi_h) #scanner.dump_header_h(libmpi_h) # -----------------------------------------
en
0.309751
# Very, very naive RE-based way for collecting declarations inside # 'cdef extern from *' Cython blocks in in source files, and next # generate compatibility headers for MPI-2 partially implemented or # built, or MPI-1 implementations, perhaps providing a subset of MPI-2 \ #ifndef PyMPI_HAVE_%(name)s #undef %(cname)s #endif \ %(ctype)s v; %(ctype)s* p; (void)v; (void)p; \ typedef struct {%(cfields)s ...; } %(ctype)s; \ typedef struct PyMPI_%(ctype)s { %(cfields)s } PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s \ typedef %(crett)s (%(cname)s)(%(cargs)s); \ typedef %(crett)s (MPIAPI PyMPI_%(cname)s)(%(cargs)s); #define %(cname)s PyMPI_%(cname)s \ const %(ctype)s %(cname)s; \ %(ctype)s v; v = %(cname)s; (void)v; \ %(crett)s %(cname)s(%(cargs)s); \ %(crett)s v; v = %(cname)s(%(cargscall)s); (void)v; \ typedef %(cbase)s... %(ctype)s; \ typedef %(ctdef)s PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s \ typedef struct{...;} %(ctype)s; \ typedef void *PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s \ #ifndef PyMPI_CONFIG_H #define PyMPI_CONFIG_H \ #endif /* !PyMPI_CONFIG_H */ \ #ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # if !defined(__cplusplus) || (__GNUC__>3||(__GNUC__==3&&__GNUC_MINOR__>=4)) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif # elif defined(__INTEL_COMPILER) || defined(__ICC) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xaaaaaaaa*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } \ #endif /* !PyMPI_MISSING_H */ # # ----------------------------------------- #libmpi_h = os.path.join('.', 'libmpi.h') #log('writing file %s' % libmpi_h) #scanner.dump_header_h(libmpi_h) # -----------------------------------------
2.136461
2
ooobuild/lo/task/x_master_password_handling2.py
Amourspirit/ooo_uno_tmpl
0
6629847
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.task import typing from abc import abstractmethod from .x_master_password_handling import XMasterPasswordHandling as XMasterPasswordHandling_49900ff7 if typing.TYPE_CHECKING: from .x_interaction_handler import XInteractionHandler as XInteractionHandler_bf80e51 class XMasterPasswordHandling2(XMasterPasswordHandling_49900ff7): """ allows to change the master password, or let it be requested and checked. See Also: `API XMasterPasswordHandling2 <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1task_1_1XMasterPasswordHandling2.html>`_ """ __ooo_ns__: str = 'com.sun.star.task' __ooo_full_ns__: str = 'com.sun.star.task.XMasterPasswordHandling2' __ooo_type_name__: str = 'interface' __pyunointerface__: str = 'com.sun.star.task.XMasterPasswordHandling2' @abstractmethod def isDefaultMasterPasswordUsed(self) -> bool: """ allows to detect whether the default master password is used """ @abstractmethod def useDefaultMasterPassword(self, xHandler: 'XInteractionHandler_bf80e51') -> bool: """ allows to let the default password be used Please use this method with care. Using of default master password let the passwords be stored non-encrypted. If a master password is predefined in the algorithm it is no more an encryption, it is just an encoding. """ __all__ = ['XMasterPasswordHandling2']
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.task import typing from abc import abstractmethod from .x_master_password_handling import XMasterPasswordHandling as XMasterPasswordHandling_49900ff7 if typing.TYPE_CHECKING: from .x_interaction_handler import XInteractionHandler as XInteractionHandler_bf80e51 class XMasterPasswordHandling2(XMasterPasswordHandling_49900ff7): """ allows to change the master password, or let it be requested and checked. See Also: `API XMasterPasswordHandling2 <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1task_1_1XMasterPasswordHandling2.html>`_ """ __ooo_ns__: str = 'com.sun.star.task' __ooo_full_ns__: str = 'com.sun.star.task.XMasterPasswordHandling2' __ooo_type_name__: str = 'interface' __pyunointerface__: str = 'com.sun.star.task.XMasterPasswordHandling2' @abstractmethod def isDefaultMasterPasswordUsed(self) -> bool: """ allows to detect whether the default master password is used """ @abstractmethod def useDefaultMasterPassword(self, xHandler: 'XInteractionHandler_bf80e51') -> bool: """ allows to let the default password be used Please use this method with care. Using of default master password let the passwords be stored non-encrypted. If a master password is predefined in the algorithm it is no more an encryption, it is just an encoding. """ __all__ = ['XMasterPasswordHandling2']
en
0.777954
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.task allows to change the master password, or let it be requested and checked. See Also: `API XMasterPasswordHandling2 <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1task_1_1XMasterPasswordHandling2.html>`_ allows to detect whether the default master password is used allows to let the default password be used Please use this method with care. Using of default master password let the passwords be stored non-encrypted. If a master password is predefined in the algorithm it is no more an encryption, it is just an encoding.
1.666897
2
scripts/data/get_info.py
morrislab/plos-medicine-joint-patterns
0
6629848
<reponame>morrislab/plos-medicine-joint-patterns<filename>scripts/data/get_info.py """ Extracts information about the data. """ import click import pandas as pd import yaml import tqdm from logging import * @click.command() @click.option( '--input', required=True, multiple=True, help='read input data from Excel files INPUT') @click.option( '--output', required=True, help='output column information to YAML file OUTPUT') def main(input, output): basicConfig( level=INFO, handlers=[ StreamHandler(), FileHandler( '{}.log'.format(output), mode='w') ]) info('Reading input data') data = { x: sorted(pd.read_excel(x).columns.tolist()) for x in tqdm.tqdm(input) } info('Writing output') with open(output, 'w') as handle: yaml.dump(data, handle, default_flow_style=False) if __name__ == '__main__': main()
""" Extracts information about the data. """ import click import pandas as pd import yaml import tqdm from logging import * @click.command() @click.option( '--input', required=True, multiple=True, help='read input data from Excel files INPUT') @click.option( '--output', required=True, help='output column information to YAML file OUTPUT') def main(input, output): basicConfig( level=INFO, handlers=[ StreamHandler(), FileHandler( '{}.log'.format(output), mode='w') ]) info('Reading input data') data = { x: sorted(pd.read_excel(x).columns.tolist()) for x in tqdm.tqdm(input) } info('Writing output') with open(output, 'w') as handle: yaml.dump(data, handle, default_flow_style=False) if __name__ == '__main__': main()
en
0.548619
Extracts information about the data.
3.008339
3
tests/admin/clush-tests/NodeSetGroupTest.py
utdsimmons/ohpc
692
6629849
<filename>tests/admin/clush-tests/NodeSetGroupTest.py #!/usr/bin/env python # ClusterShell.Node* test suite """Unit test for NodeSet with Group support""" import copy import shutil import sys import unittest sys.path.insert(0, '../lib') from TLib import * # Wildcard import for testing purpose from ClusterShell.NodeSet import * from ClusterShell.NodeUtils import * def makeTestG1(): """Create a temporary group file 1""" f1 = make_temp_file(""" # oss: montana5,montana4 mds: montana6 io: montana[4-6] #42: montana3 compute: montana[32-163] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] chassis4: montana[38-39] chassis5: montana[40-41] chassis6: montana[42-43] chassis7: montana[44-45] chassis8: montana[46-47] chassis9: montana[48-49] chassis10: montana[50-51] chassis11: montana[52-53] chassis12: montana[54-55] Uppercase: montana[1-2] gpuchassis: @chassis[4-5] gpu: montana[38-41] all: montana[1-6,32-163] """) # /!\ Need to return file object and not f1.name, otherwise the temporary # file might be immediately unlinked. return f1 def makeTestG2(): """Create a temporary group file 2""" f2 = make_temp_file(""" # # para: montana[32-37,42-55] gpu: montana[38-41] escape%test: montana[87-90] esc%test2: @escape%test """) return f2 def makeTestG3(): """Create a temporary group file 3""" f3 = make_temp_file(""" # # all: montana[32-55] para: montana[32-37,42-55] gpu: montana[38-41] login: montana[32-33] overclock: montana[41-42] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] single: idaho """) return f3 def makeTestR3(): """Create a temporary reverse group file 3""" r3 = make_temp_file(""" # # montana32: all,para,login,chassis1 montana33: all,para,login,chassis1 montana34: all,para,chassis2 montana35: all,para,chassis2 montana36: all,para,chassis3 montana37: all,para,chassis3 montana38: all,gpu montana39: all,gpu montana40: all,gpu montana41: all,gpu,overclock montana42: all,para,overclock montana43: all,para montana44: all,para montana45: all,para montana46: all,para montana47: all,para montana48: all,para montana49: all,para montana50: all,para montana51: all,para montana52: all,para montana53: all,para montana54: all,para montana55: all,para idaho: single """) return r3 def makeTestG4(): """Create a temporary group file 4 (nD)""" f4 = make_temp_file(""" # rack-x1y1: idaho1z1,idaho2z1 rack-x1y2: idaho2z1,idaho3z1 rack-x2y1: idaho4z1,idaho5z1 rack-x2y2: idaho6z1,idaho7z1 rack-x1: @rack-x1y[1-2] rack-x2: @rack-x2y[1-2] rack-y1: @rack-x[1-2]y1 rack-y2: @rack-x[1-2]y2 rack-all: @rack-x[1-2]y[1-2] """) return f4 class NodeSetGroupTest(unittest.TestCase): def setUp(self): """setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests""" set_std_group_resolver(GroupResolver()) # dummy resolver def tearDown(self): """tearDown: restore standard group resolver""" set_std_group_resolver(None) # restore std resolver def testGroupResolverSimple(self): """test NodeSet with simple custom GroupResolver""" test_groups1 = makeTestG1() source = UpcallGroupSource( "simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name, None) # create custom resolver with default source res = GroupResolver(source) self.assertFalse(res.has_node_groups()) self.assertFalse(res.has_node_groups("dummy_namespace")) nodeset = NodeSet("@gpu", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-41]")) self.assertEqual(str(nodeset), "montana[38-41]") nodeset = NodeSet("@chassis3", resolver=res) self.assertEqual(str(nodeset), "montana[36-37]") nodeset = NodeSet("@chassis[3-4]", resolver=res) self.assertEqual(str(nodeset), "montana[36-39]") nodeset = NodeSet("@chassis[1,3,5]", resolver=res) self.assertEqual(str(nodeset), "montana[32-33,36-37,40-41]") nodeset = NodeSet("@chassis[2-12/2]", resolver=res) self.assertEqual(str(nodeset), "montana[34-35,38-39,42-43,46-47,50-51,54-55]") nodeset = NodeSet("@chassis[1,3-4,5-11/3]", resolver=res) self.assertEqual(str(nodeset), "montana[32-33,36-41,46-47,52-53]") # test recursive group gpuchassis nodeset1 = NodeSet("@chassis[4-5]", resolver=res) nodeset2 = NodeSet("@gpu", resolver=res) nodeset3 = NodeSet("@gpuchassis", resolver=res) self.assertEqual(nodeset1, nodeset2) self.assertEqual(nodeset2, nodeset3) # test also with some inline operations nodeset = NodeSet("montana3,@gpuchassis!montana39,montana77^montana38", resolver=res) self.assertEqual(str(nodeset), "montana[3,40-41,77]") def testAllNoResolver(self): """test NodeSet.fromall() with no resolver""" self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=RESOLVER_NOGROUP) def testGroupsNoResolver(self): """test NodeSet.groups() with no resolver""" nodeset = NodeSet("foo", resolver=RESOLVER_NOGROUP) self.assertRaises(NodeSetExternalError, nodeset.groups) def testGroupResolverAddSourceError(self): """test GroupResolver.add_source() error""" test_groups1 = makeTestG1() source = UpcallGroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name, None) res = GroupResolver(source) # adding the same source again should raise ValueError self.assertRaises(ValueError, res.add_source, source) def testGroupResolverMinimal(self): """test NodeSet with minimal GroupResolver""" test_groups1 = makeTestG1() source = UpcallGroupSource("minimal", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, None, None, None) # create custom resolver with default source res = GroupResolver(source) nodeset = NodeSet("@gpu", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-41]")) self.assertEqual(str(nodeset), "montana[38-41]") self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res) def testConfigEmpty(self): """test groups with an empty configuration file""" f = make_temp_file("") res = GroupResolverConfig(f.name) # NodeSet should work nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # without group support self.assertRaises(GroupResolverSourceError, nodeset.regroup) self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res) def testConfigResolverEmpty(self): """test groups resolver with an empty file list""" # empty file list OR as if no config file is parsable res = GroupResolverConfig([]) # NodeSet should work nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # without group support self.assertRaises(GroupResolverSourceError, nodeset.regroup) self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res) def testConfigBasicLocal(self): """test groups with a basic local config file""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(nodeset.groups().keys(), ["@foo"]) self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") # No 'all' defined: all_nodes() should raise an error self.assertRaises(GroupSourceNoUpcall, res.all_nodes) # No 'reverse' defined: node_groups() should raise an error self.assertRaises(GroupSourceNoUpcall, res.node_groups, "example1") # regroup with rest nodeset = NodeSet("example[1-101]", resolver=res) self.assertEqual(nodeset.regroup(), "@foo,example101") # regroup incomplete nodeset = NodeSet("example[50-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[50-200]") # regroup no matching nodeset = NodeSet("example[102-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[102-200]") def testConfigWrongSyntax(self): """test wrong groups config syntax""" f = make_temp_file(""" # A comment [Main] default: local [local] something: echo example[1-100] """) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigBasicLocalVerbose(self): """test groups with a basic local config file (verbose)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicLocalAlternative(self): """test groups with a basic local config file (= alternative)""" f = make_temp_file(""" # A comment [Main] default=local [local] map=echo example[1-100] #all= list=echo foo #reverse= """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") # @truc? def testConfigBasicEmptyDefault(self): """test groups with a empty default namespace""" f = make_temp_file(""" # A comment [Main] default: [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicNoMain(self): """test groups with a local config without main section""" f = make_temp_file(""" # A comment [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicWrongDefault(self): """test groups with a wrong default namespace""" f = make_temp_file(""" # A comment [Main] default: pointless [local] map: echo example[1-100] #all: list: echo foo #reverse: """) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigQueryFailed(self): """test groups with config and failed query""" f = make_temp_file(""" # A comment [Main] default: local [local] map: false all: false list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertRaises(NodeSetExternalError, nodeset.regroup) # all_nodes() self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res) def testConfigQueryFailedReverse(self): """test groups with config and failed query (reverse)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example1 list: echo foo reverse: false """) res = GroupResolverConfig(f.name) nodeset = NodeSet("@foo", resolver=res) self.assertEqual(str(nodeset), "example1") self.assertRaises(NodeSetExternalError, nodeset.regroup) def testConfigRegroupWrongNamespace(self): """test groups by calling regroup(wrong_namespace)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertRaises(GroupResolverSourceError, nodeset.regroup, "unknown") def testConfigNoListNoReverse(self): """test groups with no list and not reverse upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # not able to regroup, should still return valid nodeset self.assertEqual(nodeset.regroup(), "example[1-100]") def testConfigNoListButReverseQuery(self): """test groups with no list but reverse upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: echo foo reverse: echo foo """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") def testConfigNoMap(self): """test groups with no map upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] #map: echo example[1-100] all: list: echo foo #reverse: echo foo """) # map is a mandatory upcall, an exception should be raised early self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigWithEmptyList(self): """test groups with list upcall returning nothing""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: : reverse: echo foo """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") def testConfigListAllWithAll(self): """test all groups listing with all upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] all: echo foo bar list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-50]", resolver=res) self.assertEqual(str(nodeset), "example[1-50]") self.assertEqual(str(NodeSet.fromall(resolver=res)), "bar,foo") # test "@*" magic group listing nodeset = NodeSet("@*", resolver=res) self.assertEqual(str(nodeset), "bar,foo") nodeset = NodeSet("rab,@*,oof", resolver=res) self.assertEqual(str(nodeset), "bar,foo,oof,rab") # with group source nodeset = NodeSet("@local:*", resolver=res) self.assertEqual(str(nodeset), "bar,foo") nodeset = NodeSet("rab,@local:*,oof", resolver=res) self.assertEqual(str(nodeset), "bar,foo,oof,rab") def testConfigListAllWithoutAll(self): """test all groups listing without all upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo bar #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-50]", resolver=res) self.assertEqual(str(nodeset), "example[1-50]") self.assertEqual(str(NodeSet.fromall(resolver=res)), "example[1-100]") # test "@*" magic group listing nodeset = NodeSet("@*", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("@*,example[101-104]", resolver=res) self.assertEqual(str(nodeset), "example[1-104]") nodeset = NodeSet("example[105-149],@*,example[101-104]", resolver=res) self.assertEqual(str(nodeset), "example[1-149]") # with group source nodeset = NodeSet("@local:*", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("example0,@local:*,example[101-110]", resolver=res) self.assertEqual(str(nodeset), "example[0-110]") def testConfigListAllNDWithoutAll(self): """test all groups listing without all upcall (nD)""" # Even in nD, ensure that $GROUP is a simple group that has been previously expanded f = make_temp_file(""" # A comment [Main] default: local [local] map: if [[ $GROUP == "x1y[3-4]" ]]; then exit 1; elif [[ $GROUP == "x1y1" ]]; then echo rack[1-5]z[1-42]; else echo rack[6-10]z[1-42]; fi #all: list: echo x1y1 x1y2 x1y[3-4] #reverse: """) res = GroupResolverConfig(f.name, illegal_chars=ILLEGAL_GROUP_CHARS) nodeset = NodeSet("rack3z40", resolver=res) self.assertEqual(str(NodeSet.fromall(resolver=res)), "rack[1-10]z[1-42]") self.assertEqual(res.grouplist(), ['x1y1', 'x1y2', 'x1y[3-4]']) # raw self.assertEqual(grouplist(resolver=res), ['x1y1', 'x1y2', 'x1y3', 'x1y4']) # cleaned # test "@*" magic group listing nodeset = NodeSet("@*", resolver=res) self.assertEqual(str(nodeset), "rack[1-10]z[1-42]") # with group source nodeset = NodeSet("@local:*", resolver=res) self.assertEqual(str(nodeset), "rack[1-10]z[1-42]") nodeset = NodeSet("rack11z1,@local:*,rack11z[2-42]", resolver=res) self.assertEqual(str(nodeset), "rack[1-11]z[1-42]") def testConfigIllegalCharsND(self): """test group list containing illegal characters""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo rack[6-10]z[1-42] #all: list: echo x1y1 x1y2 @illegal x1y[3-4] #reverse: """) res = GroupResolverConfig(f.name, illegal_chars=ILLEGAL_GROUP_CHARS) nodeset = NodeSet("rack3z40", resolver=res) self.assertRaises(GroupResolverIllegalCharError, res.grouplist) def testConfigResolverSources(self): """test sources() with groups config of 2 sources""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] [other] map: echo example[1-10] """) res = GroupResolverConfig(f.name) self.assertEqual(len(res.sources()), 2) self.assert_('local' in res.sources()) self.assert_('other' in res.sources()) def testConfigCrossRefs(self): """test groups config with cross references""" f = make_temp_file(""" # A comment [Main] default: other [local] map: echo example[1-100] [other] map: echo "foo: @local:foo" | sed -n 's/^$GROUP:\(.*\)/\\1/p' [third] map: echo -e "bar: @ref-rel\\nref-rel: @other:foo\\nref-all: @*" | sed -n 's/^$GROUP:\(.*\)/\\1/p' list: echo bar """) res = GroupResolverConfig(f.name) nodeset = NodeSet("@other:foo", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # @third:bar -> @ref-rel (third) -> @other:foo -> @local:foo -> nodes nodeset = NodeSet("@third:bar", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("@third:ref-all", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") def testConfigGroupsDirDummy(self): """test groups with groupsdir defined (dummy)""" f = make_temp_file(""" [Main] default: local groupsdir: /path/to/nowhere [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigGroupsDirExists(self): """test groups with groupsdir defined (real, other)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: new_local groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: """ % dname) f2 = make_temp_file(""" [new_local] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@bar") self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]") finally: f2.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigGroupsMultipleDirs(self): """test groups with multiple confdir defined""" dname1 = make_temp_dir() dname2 = make_temp_dir() # Notes: # - use dname1 two times to check dup checking code # - use quotes on one of the directory path f = make_temp_file(""" [Main] default: local2 confdir: "%s" %s %s [local] map: echo example[1-100] list: echo foo """ % (dname1, dname2, dname1)) fs1 = make_temp_file(""" [local1] map: echo loc1node[1-100] list: echo bar """, suffix=".conf", dir=dname1) fs2 = make_temp_file(""" [local2] map: echo loc2node[02-50] list: echo toto """, suffix=".conf", dir=dname2) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # local self.assertEqual(nodeset.regroup("local"), "@local:foo") self.assertEqual(str(NodeSet("@local:foo", resolver=res)), "example[1-100]") # local1 nodeset = NodeSet("loc1node[1-100]", resolver=res) self.assertEqual(nodeset.regroup("local1"), "@local1:bar") self.assertEqual(str(NodeSet("@local1:bar", resolver=res)), "loc1node[1-100]") # local2 nodeset = NodeSet("loc2node[02-50]", resolver=res) self.assertEqual(nodeset.regroup(), "@toto") # default group source self.assertEqual(str(NodeSet("@toto", resolver=res)), "loc2node[02-50]") finally: fs2.close() fs1.close() f.close() shutil.rmtree(dname2, ignore_errors=True) shutil.rmtree(dname1, ignore_errors=True) def testConfigGroupsDirDupConfig(self): """test groups with duplicate in groupsdir""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: iamdup groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: """ % dname) f2 = make_temp_file(""" [iamdup] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname) f3 = make_temp_file(""" [iamdup] map: echo example[10-200] #all: list: echo patato #reverse: """, suffix=".conf", dir=dname) try: self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) finally: f3.close() f2.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigGroupsDirExistsNoOther(self): """test groups with groupsdir defined (real, no other)""" dname1 = make_temp_dir() dname2 = make_temp_dir() f = make_temp_file(""" [Main] default: new_local groupsdir: %s %s """ % (dname1, dname2)) f2 = make_temp_file(""" [new_local] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname2) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@bar") self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]") finally: f2.close() f.close() shutil.rmtree(dname1, ignore_errors=True) shutil.rmtree(dname2, ignore_errors=True) def testConfigGroupsDirNotADirectory(self): """test groups with groupsdir defined (not a directory)""" dname = make_temp_dir() fdummy = make_temp_file("wrong") f = make_temp_file(""" [Main] default: new_local groupsdir: %s """ % fdummy.name) try: self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) finally: fdummy.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigIllegalChars(self): """test groups with illegal characters""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo 'foo *' reverse: echo f^oo """) res = GroupResolverConfig(f.name, illegal_chars=set("@,&!&^*")) nodeset = NodeSet("example[1-100]", resolver=res) self.assertRaises(GroupResolverIllegalCharError, nodeset.groups) self.assertRaises(GroupResolverIllegalCharError, nodeset.regroup) def testConfigMaxRecursionError(self): """test groups maximum recursion depth exceeded error""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo @deep list: echo deep """) res = GroupResolverConfig(f.name) self.assertRaises(NodeSetParseError, NodeSet, "@deep", resolver=res) def testGroupResolverND(self): """test NodeSet with simple custom GroupResolver (nD)""" test_groups4 = makeTestG4() source = UpcallGroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups4.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups4.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups4.name, None) # create custom resolver with default source res = GroupResolver(source) self.assertFalse(res.has_node_groups()) self.assertFalse(res.has_node_groups("dummy_namespace")) nodeset = NodeSet("@rack-x1y2", resolver=res) self.assertEqual(nodeset, NodeSet("idaho[2-3]z1")) self.assertEqual(str(nodeset), "idaho[2-3]z1") nodeset = NodeSet("@rack-y1", resolver=res) self.assertEqual(str(nodeset), "idaho[1-2,4-5]z1") nodeset = NodeSet("@rack-all", resolver=res) self.assertEqual(str(nodeset), "idaho[1-7]z1") # test NESTED nD groups() self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x1', '@rack-x1y1', '@rack-x1y2', '@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1', '@rack-y2']) self.assertEqual(sorted(nodeset.groups(groupsource="simple").keys()), ['@simple:rack-all', '@simple:rack-x1', '@simple:rack-x1y1', '@simple:rack-x1y2', '@simple:rack-x2', '@simple:rack-x2y1', '@simple:rack-x2y2', '@simple:rack-y1', '@simple:rack-y2']) self.assertEqual(sorted(nodeset.groups(groupsource="simple", noprefix=True).keys()), ['@rack-all', '@rack-x1', '@rack-x1y1', '@rack-x1y2', '@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1', '@rack-y2']) testns = NodeSet() for gnodes, inodes in nodeset.groups().itervalues(): testns.update(inodes) self.assertEqual(testns, nodeset) # more tests with nested groups nodeset = NodeSet("idaho5z1", resolver=res) self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-y1']) nodeset = NodeSet("idaho5z1,idaho4z1", resolver=res) self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-y1']) nodeset = NodeSet("idaho5z1,idaho7z1", resolver=res) self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1', '@rack-y2']) def testConfigCFGDIR(self): """test groups with $CFGDIR use in upcalls""" f = make_temp_file(""" [Main] default: local [local] map: echo example[1-100] list: basename $CFGDIR """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) # just a trick to check $CFGDIR resolution... tmpgroup = os.path.basename(os.path.dirname(f.name)) self.assertEqual(nodeset.groups().keys(), ['@%s' % tmpgroup]) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@%s" % tmpgroup) self.assertEqual(str(NodeSet("@%s" % tmpgroup, resolver=res)), "example[1-100]") def test_fromall_grouplist(self): """test NodeSet.fromall() without all upcall""" # Group Source that has no all upcall and that can handle special char test_groups2 = makeTestG2() source = UpcallGroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups2.name, None, "sed -n 's/^\([0-9A-Za-z_-\%%]*\):.*/\\1/p' %s" % test_groups2.name, None) res = GroupResolver(source) # fromall will trigger ParserEngine.grouplist() that we want to test here nsall = NodeSet.fromall(resolver=res) # if working, group resolution worked with % char self.assertEqual(str(NodeSet.fromall(resolver=res)), "montana[32-55,87-90]") self.assertEqual(len(nsall), 28) # btw explicitly check escaped char nsesc = NodeSet('@escape%test', resolver=res) self.assertEqual(str(nsesc), 'montana[87-90]') self.assertEqual(len(nsesc), 4) nsesc2 = NodeSet('@esc%test2', resolver=res) self.assertEqual(nsesc, nsesc2) ns = NodeSet('montana[87-90]', resolver=res) # could also result in escape%test? self.assertEqual(ns.regroup(), '@esc%test2') class NodeSetGroup2GSTest(unittest.TestCase): def setUp(self): """configure simple RESOLVER_STD_GROUP""" # create temporary groups file and keep a reference to avoid file closing self.test_groups1 = makeTestG1() self.test_groups2 = makeTestG2() # create 2 GroupSource objects default = UpcallGroupSource("default", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups1.name, None) source2 = UpcallGroupSource("source2", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups2.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups2.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups2.name, None) resolver = GroupResolver(default) resolver.add_source(source2) set_std_group_resolver(resolver) def tearDown(self): """restore default RESOLVER_STD_GROUP""" set_std_group_resolver(None) del self.test_groups1 del self.test_groups2 def testGroupSyntaxes(self): """test NodeSet group operation syntaxes""" nodeset = NodeSet("@gpu") self.assertEqual(str(nodeset), "montana[38-41]") nodeset = NodeSet("@chassis[1-3,5]&@chassis[2-3]") self.assertEqual(str(nodeset), "montana[34-37]") nodeset1 = NodeSet("@io!@mds") nodeset2 = NodeSet("@oss") self.assertEqual(str(nodeset1), str(nodeset2)) self.assertEqual(str(nodeset1), "montana[4-5]") def testGroupListDefault(self): """test NodeSet group listing GroupResolver.grouplist()""" groups = std_group_resolver().grouplist() self.assertEqual(len(groups), 20) helper_groups = grouplist() self.assertEqual(len(helper_groups), 20) total = 0 nodes = NodeSet() for group in groups: ns = NodeSet("@%s" % group) total += len(ns) nodes.update(ns) self.assertEqual(total, 310) all_nodes = NodeSet.fromall() self.assertEqual(len(all_nodes), len(nodes)) self.assertEqual(all_nodes, nodes) def testGroupListSource2(self): """test NodeSet group listing GroupResolver.grouplist(source)""" groups = std_group_resolver().grouplist("source2") self.assertEqual(len(groups), 2) total = 0 for group in groups: total += len(NodeSet("@source2:%s" % group)) self.assertEqual(total, 24) def testGroupNoPrefix(self): """test NodeSet group noprefix option""" nodeset = NodeSet("montana[32-37,42-55]") self.assertEqual(nodeset.regroup("source2"), "@source2:para") self.assertEqual(nodeset.regroup("source2", noprefix=True), "@para") def testGroupGroups(self): """test NodeSet.groups()""" nodeset = NodeSet("montana[32-37,42-55]") self.assertEqual(sorted(nodeset.groups().keys()), ['@all', '@chassis1', '@chassis10', '@chassis11', '@chassis12', '@chassis2', '@chassis3', '@chassis6', '@chassis7', '@chassis8', '@chassis9', '@compute']) testns = NodeSet() for gnodes, inodes in nodeset.groups().itervalues(): testns.update(inodes) self.assertEqual(testns, nodeset) class NodeSetRegroupTest(unittest.TestCase): def setUp(self): """setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests""" set_std_group_resolver(GroupResolver()) # dummy resolver def tearDown(self): """tearDown: restore standard group resolver""" set_std_group_resolver(None) # restore std resolver def testGroupResolverReverse(self): """test NodeSet GroupResolver with reverse upcall""" test_groups3 = makeTestG3() test_reverse3 = makeTestR3() source = UpcallGroupSource("test", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups3.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups3.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups3.name, "awk -F: '/^$NODE:/ { gsub(\",\",\"\\n\",$2); print $2 }' %s" % test_reverse3.name) # create custom resolver with default source res = GroupResolver(source) nodeset = NodeSet("@all", resolver=res) self.assertEqual(nodeset, NodeSet("montana[32-55]")) self.assertEqual(str(nodeset), "montana[32-55]") self.assertEqual(nodeset.regroup(), "@all") self.assertEqual(nodeset.regroup(), "@all") nodeset = NodeSet("@overclock", resolver=res) self.assertEqual(nodeset, NodeSet("montana[41-42]")) self.assertEqual(str(nodeset), "montana[41-42]") self.assertEqual(nodeset.regroup(), "@overclock") self.assertEqual(nodeset.regroup(), "@overclock") nodeset = NodeSet("@gpu,@overclock", resolver=res) self.assertEqual(str(nodeset), "montana[38-42]") self.assertEqual(nodeset, NodeSet("montana[38-42]")) # un-overlap :) self.assertEqual(nodeset.regroup(), "@gpu,montana42") self.assertEqual(nodeset.regroup(), "@gpu,montana42") self.assertEqual(nodeset.regroup(overlap=True), "@gpu,@overclock") nodeset = NodeSet("montana41", resolver=res) self.assertEqual(nodeset.regroup(), "montana41") self.assertEqual(nodeset.regroup(), "montana41") # test regroup code when using unindexed node nodeset = NodeSet("idaho", resolver=res) self.assertEqual(nodeset.regroup(), "@single") self.assertEqual(nodeset.regroup(), "@single") nodeset = NodeSet("@single", resolver=res) self.assertEqual(str(nodeset), "idaho") # unresolved unindexed: nodeset = NodeSet("utah", resolver=res) self.assertEqual(nodeset.regroup(), "utah") self.assertEqual(nodeset.regroup(), "utah") nodeset = NodeSet("@all!montana38", resolver=res) self.assertEqual(nodeset, NodeSet("montana[32-37,39-55]")) self.assertEqual(str(nodeset), "montana[32-37,39-55]") self.assertEqual(nodeset.regroup(), "@para,montana[39-41]") self.assertEqual(nodeset.regroup(), "@para,montana[39-41]") self.assertEqual(nodeset.regroup(overlap=True), "@chassis[1-3],@login,@overclock,@para,montana[39-40]") self.assertEqual(nodeset.regroup(overlap=True), "@chassis[1-3],@login,@overclock,@para,montana[39-40]") nodeset = NodeSet("montana[32-37]", resolver=res) self.assertEqual(nodeset.regroup(), "@chassis[1-3]") self.assertEqual(nodeset.regroup(), "@chassis[1-3]") class StaticGroupSource(UpcallGroupSource): """ A memory only group source based on a provided dict. """ def __init__(self, name, data): all_upcall = None if 'all' in data: all_upcall = 'fake_all' list_upcall = None if 'list' in data: list_upcall = 'fake_list' UpcallGroupSource.__init__(self, name, "fake_map", all_upcall, list_upcall) self._data = data def _upcall_read(self, cmdtpl, args=dict()): if cmdtpl == 'map': return self._data[cmdtpl].get(args['GROUP']) elif cmdtpl == 'reverse': return self._data[cmdtpl].get(args['NODE']) else: return self._data[cmdtpl] class GroupSourceCacheTest(unittest.TestCase): def test_clear_cache(self): """test GroupSource.clear_cache()""" source = StaticGroupSource('cache', {'map': {'a': 'foo1', 'b': 'foo2'} }) # create custom resolver with default source res = GroupResolver(source) # Populate map cache self.assertEqual("foo1", str(NodeSet("@a", resolver=res))) self.assertEqual("foo2", str(NodeSet("@b", resolver=res))) self.assertEqual(len(source._cache['map']), 2) # Clear cache source.clear_cache() self.assertEqual(len(source._cache['map']), 0) def test_expired_cache(self): """test UpcallGroupSource cache entries expired according to config""" # create custom resolver with default source source = StaticGroupSource('cache', {'map': {'a': 'foo1', 'b': 'foo2'} }) source.cache_time = 0.2 res = GroupResolver(source) # Populate map cache self.assertEqual("foo1", str(NodeSet("@a", resolver=res))) self.assertEqual("foo2", str(NodeSet("@b", resolver=res))) self.assertEqual(len(source._cache['map']), 2) # Be sure 0.2 cache time is expired (especially for old Python version) time.sleep(0.25) source._data['map']['a'] = 'something_else' self.assertEqual('something_else', str(NodeSet("@a", resolver=res))) def test_config_cache_time(self): """test group config cache_time options""" f = make_temp_file(""" [local] cache_time: 0.2 map: echo foo1 """) res = GroupResolverConfig(f.name) self.assertEqual(res._sources['local'].cache_time, 0.2) self.assertEqual("foo1", str(NodeSet("@local:foo", resolver=res))) class GroupSourceTest(unittest.TestCase): """Test class for 1.7 dict-based GroupSource""" def test_base_class0(self): """test base GroupSource class (empty)""" gs = GroupSource("emptysrc") self.assertEqual(gs.resolv_map('gr1'), '') self.assertEqual(gs.resolv_map('gr2'), '') self.assertEqual(gs.resolv_list(), []) self.assertRaises(GroupSourceNoUpcall, gs.resolv_all) self.assertRaises(GroupSourceNoUpcall, gs.resolv_reverse, 'n4') def test_base_class1(self): """test base GroupSource class (map and list)""" gs = GroupSource("testsrc", { 'gr1': ['n1', 'n4', 'n3', 'n2'], 'gr2': ['n9', 'n4'] }) self.assertEqual(gs.resolv_map('gr1'), ['n1', 'n4', 'n3', 'n2']) self.assertEqual(gs.resolv_map('gr2'), ['n9', 'n4']) self.assertEqual(sorted(gs.resolv_list()), ['gr1', 'gr2']) self.assertRaises(GroupSourceNoUpcall, gs.resolv_all) self.assertRaises(GroupSourceNoUpcall, gs.resolv_reverse, 'n4') def test_base_class2(self): """test base GroupSource class (all)""" gs = GroupSource("testsrc", { 'gr1': ['n1', 'n4', 'n3', 'n2'], 'gr2': ['n9', 'n4'] }, 'n[1-9]') self.assertEqual(gs.resolv_all(), 'n[1-9]') class YAMLGroupLoaderTest(unittest.TestCase): def test_missing_pyyaml(self): """test YAMLGroupLoader with missing PyYAML""" sys_path_saved = sys.path try: sys.path = [] # make import yaml failed if 'yaml' in sys.modules: # forget about previous yaml import del sys.modules['yaml'] f = make_temp_file(""" vendors: apricot: node""") self.assertRaises(GroupResolverConfigError, YAMLGroupLoader, f.name) finally: sys.path = sys_path_saved def test_one_source(self): """test YAMLGroupLoader one source""" f = make_temp_file(""" vendors: apricot: node""") loader = YAMLGroupLoader(f.name) sources = list(loader) self.assertEqual(len(sources), 1) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node' }) def test_multi_sources(self): """test YAMLGroupLoader multi sources""" f = make_temp_file(""" vendors: apricot: node customers: cherry: client-4-2""") loader = YAMLGroupLoader(f.name) sources = list(loader) self.assertEqual(len(sources), 2) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node' }) self.assertEqual(loader.groups("customers"), { 'cherry': 'client-4-2' }) def test_reload(self): """test YAMLGroupLoader cache_time""" f = make_temp_file(""" vendors: apricot: "node[1-10]" avocado: 'node[11-20]' banana: node[21-30] customers: cherry: client-4-2""") loader = YAMLGroupLoader(f.name, cache_time=1) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node[1-10]', 'avocado': 'node[11-20]', 'banana': 'node[21-30]' }) # modify YAML file and check that it is reloaded after cache_time f.write("\n nut: node42\n") # oh and BTW for ultimate code coverage, test if we add a new source # on-the-fly, this is not supported but should be ignored f.write("thieves:\n pomegranate: node100\n") f.flush() time.sleep(0.1) # too soon self.assertEqual(loader.groups("customers"), { 'cherry': 'client-4-2' }) time.sleep(1.0) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node[1-10]', 'avocado': 'node[11-20]', 'banana': 'node[21-30]' }) self.assertEqual(loader.groups("customers"), { 'cherry': 'client-4-2', 'nut': 'node42' }) def test_iter(self): """test YAMLGroupLoader iterator""" f = make_temp_file(""" src1: src1grp1: node11 src1grp2: node12 src2: src2grp1: node21 src2grp2: node22 src3: src3grp1: node31 src3grp2: node32""") loader = YAMLGroupLoader(f.name, cache_time = 0.1) # iterate sources with cache expired for source in loader: time.sleep(0.5) # force reload self.assertEqual(len(source.groups), 2) class GroupResolverYAMLTest(unittest.TestCase): def setUp(self): """setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests""" set_std_group_resolver(GroupResolver()) # dummy resolver def tearDown(self): """tearDown: restore standard group resolver""" set_std_group_resolver(None) # restore std resolver def test_yaml_basic(self): """test groups with a basic YAML config file""" dname = make_temp_dir() f = make_temp_file(""" # A comment [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" yaml: foo: example[1-4,91-100],example90 bar: example[5-89] """, suffix=".yaml", dir=dname) res = GroupResolverConfig(f.name) # Group resolution nodeset = NodeSet("@foo", resolver=res) self.assertEqual(str(nodeset), "example[1-4,90-100]") nodeset = NodeSet("@bar", resolver=res) self.assertEqual(str(nodeset), "example[5-89]") nodeset = NodeSet("@foo,@bar", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("@unknown", resolver=res) self.assertEqual(len(nodeset), 0) # Regroup nodeset = NodeSet("example[1-4,90-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-4,90-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(nodeset.groups().keys(), ["@foo"]) self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-4,90-100]") # No 'all' defined: all_nodes() should raise an error self.assertRaises(GroupSourceError, res.all_nodes) # but then NodeSet falls back to the union of all groups nodeset = NodeSet.fromall(resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # regroup doesn't use @all in that case self.assertEqual(nodeset.regroup(), "@bar,@foo") # No 'reverse' defined: node_groups() should raise an error self.assertRaises(GroupSourceError, res.node_groups, "example1") # regroup with rest nodeset = NodeSet("example[1-101]", resolver=res) self.assertEqual(nodeset.regroup(), "@bar,@foo,example101") # regroup incomplete nodeset = NodeSet("example[50-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[50-200]") # regroup no matching nodeset = NodeSet("example[102-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[102-200]") def test_yaml_fromall(self): """test groups special all group""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" yaml: foo: example[1-4,91-100],example90 bar: example[5-89] all: example[90-100] """, suffix=".yaml", dir=dname) res = GroupResolverConfig(f.name) nodeset = NodeSet.fromall(resolver=res) self.assertEqual(str(nodeset), "example[90-100]") # regroup uses @all if it is defined self.assertEqual(nodeset.regroup(), "@all") def test_yaml_invalid_groups_not_dict(self): """test groups with an invalid YAML config file (1)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" yaml: bar """, suffix=".yaml", dir=dname) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_yaml_invalid_root_dict(self): """test groups with an invalid YAML config file (2)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" - Casablanca - North by Northwest - The Man Who Wasn't There """, suffix=".yaml", dir=dname) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_yaml_invalid_not_yaml(self): """test groups with an invalid YAML config file (3)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" [Dummy] one: un two: deux three: trois """, suffix=".yaml", dir=dname) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_wrong_autodir(self): """test wrong autodir (doesn't exist)""" f = make_temp_file(""" [Main] autodir: /i/do/not/=exist= default: local """) # absent autodir itself doesn't raise any exception, but default # pointing to nothing does... self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_wrong_autodir_is_file(self): """test wrong autodir (is a file)""" fe = make_temp_file("") f = make_temp_file(""" [Main] autodir: %s default: local [local] map: node """ % fe.name) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
<filename>tests/admin/clush-tests/NodeSetGroupTest.py #!/usr/bin/env python # ClusterShell.Node* test suite """Unit test for NodeSet with Group support""" import copy import shutil import sys import unittest sys.path.insert(0, '../lib') from TLib import * # Wildcard import for testing purpose from ClusterShell.NodeSet import * from ClusterShell.NodeUtils import * def makeTestG1(): """Create a temporary group file 1""" f1 = make_temp_file(""" # oss: montana5,montana4 mds: montana6 io: montana[4-6] #42: montana3 compute: montana[32-163] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] chassis4: montana[38-39] chassis5: montana[40-41] chassis6: montana[42-43] chassis7: montana[44-45] chassis8: montana[46-47] chassis9: montana[48-49] chassis10: montana[50-51] chassis11: montana[52-53] chassis12: montana[54-55] Uppercase: montana[1-2] gpuchassis: @chassis[4-5] gpu: montana[38-41] all: montana[1-6,32-163] """) # /!\ Need to return file object and not f1.name, otherwise the temporary # file might be immediately unlinked. return f1 def makeTestG2(): """Create a temporary group file 2""" f2 = make_temp_file(""" # # para: montana[32-37,42-55] gpu: montana[38-41] escape%test: montana[87-90] esc%test2: @escape%test """) return f2 def makeTestG3(): """Create a temporary group file 3""" f3 = make_temp_file(""" # # all: montana[32-55] para: montana[32-37,42-55] gpu: montana[38-41] login: montana[32-33] overclock: montana[41-42] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] single: idaho """) return f3 def makeTestR3(): """Create a temporary reverse group file 3""" r3 = make_temp_file(""" # # montana32: all,para,login,chassis1 montana33: all,para,login,chassis1 montana34: all,para,chassis2 montana35: all,para,chassis2 montana36: all,para,chassis3 montana37: all,para,chassis3 montana38: all,gpu montana39: all,gpu montana40: all,gpu montana41: all,gpu,overclock montana42: all,para,overclock montana43: all,para montana44: all,para montana45: all,para montana46: all,para montana47: all,para montana48: all,para montana49: all,para montana50: all,para montana51: all,para montana52: all,para montana53: all,para montana54: all,para montana55: all,para idaho: single """) return r3 def makeTestG4(): """Create a temporary group file 4 (nD)""" f4 = make_temp_file(""" # rack-x1y1: idaho1z1,idaho2z1 rack-x1y2: idaho2z1,idaho3z1 rack-x2y1: idaho4z1,idaho5z1 rack-x2y2: idaho6z1,idaho7z1 rack-x1: @rack-x1y[1-2] rack-x2: @rack-x2y[1-2] rack-y1: @rack-x[1-2]y1 rack-y2: @rack-x[1-2]y2 rack-all: @rack-x[1-2]y[1-2] """) return f4 class NodeSetGroupTest(unittest.TestCase): def setUp(self): """setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests""" set_std_group_resolver(GroupResolver()) # dummy resolver def tearDown(self): """tearDown: restore standard group resolver""" set_std_group_resolver(None) # restore std resolver def testGroupResolverSimple(self): """test NodeSet with simple custom GroupResolver""" test_groups1 = makeTestG1() source = UpcallGroupSource( "simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name, None) # create custom resolver with default source res = GroupResolver(source) self.assertFalse(res.has_node_groups()) self.assertFalse(res.has_node_groups("dummy_namespace")) nodeset = NodeSet("@gpu", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-41]")) self.assertEqual(str(nodeset), "montana[38-41]") nodeset = NodeSet("@chassis3", resolver=res) self.assertEqual(str(nodeset), "montana[36-37]") nodeset = NodeSet("@chassis[3-4]", resolver=res) self.assertEqual(str(nodeset), "montana[36-39]") nodeset = NodeSet("@chassis[1,3,5]", resolver=res) self.assertEqual(str(nodeset), "montana[32-33,36-37,40-41]") nodeset = NodeSet("@chassis[2-12/2]", resolver=res) self.assertEqual(str(nodeset), "montana[34-35,38-39,42-43,46-47,50-51,54-55]") nodeset = NodeSet("@chassis[1,3-4,5-11/3]", resolver=res) self.assertEqual(str(nodeset), "montana[32-33,36-41,46-47,52-53]") # test recursive group gpuchassis nodeset1 = NodeSet("@chassis[4-5]", resolver=res) nodeset2 = NodeSet("@gpu", resolver=res) nodeset3 = NodeSet("@gpuchassis", resolver=res) self.assertEqual(nodeset1, nodeset2) self.assertEqual(nodeset2, nodeset3) # test also with some inline operations nodeset = NodeSet("montana3,@gpuchassis!montana39,montana77^montana38", resolver=res) self.assertEqual(str(nodeset), "montana[3,40-41,77]") def testAllNoResolver(self): """test NodeSet.fromall() with no resolver""" self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=RESOLVER_NOGROUP) def testGroupsNoResolver(self): """test NodeSet.groups() with no resolver""" nodeset = NodeSet("foo", resolver=RESOLVER_NOGROUP) self.assertRaises(NodeSetExternalError, nodeset.groups) def testGroupResolverAddSourceError(self): """test GroupResolver.add_source() error""" test_groups1 = makeTestG1() source = UpcallGroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name, None) res = GroupResolver(source) # adding the same source again should raise ValueError self.assertRaises(ValueError, res.add_source, source) def testGroupResolverMinimal(self): """test NodeSet with minimal GroupResolver""" test_groups1 = makeTestG1() source = UpcallGroupSource("minimal", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name, None, None, None) # create custom resolver with default source res = GroupResolver(source) nodeset = NodeSet("@gpu", resolver=res) self.assertEqual(nodeset, NodeSet("montana[38-41]")) self.assertEqual(str(nodeset), "montana[38-41]") self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res) def testConfigEmpty(self): """test groups with an empty configuration file""" f = make_temp_file("") res = GroupResolverConfig(f.name) # NodeSet should work nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # without group support self.assertRaises(GroupResolverSourceError, nodeset.regroup) self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res) def testConfigResolverEmpty(self): """test groups resolver with an empty file list""" # empty file list OR as if no config file is parsable res = GroupResolverConfig([]) # NodeSet should work nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # without group support self.assertRaises(GroupResolverSourceError, nodeset.regroup) self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res) def testConfigBasicLocal(self): """test groups with a basic local config file""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(nodeset.groups().keys(), ["@foo"]) self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") # No 'all' defined: all_nodes() should raise an error self.assertRaises(GroupSourceNoUpcall, res.all_nodes) # No 'reverse' defined: node_groups() should raise an error self.assertRaises(GroupSourceNoUpcall, res.node_groups, "example1") # regroup with rest nodeset = NodeSet("example[1-101]", resolver=res) self.assertEqual(nodeset.regroup(), "@foo,example101") # regroup incomplete nodeset = NodeSet("example[50-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[50-200]") # regroup no matching nodeset = NodeSet("example[102-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[102-200]") def testConfigWrongSyntax(self): """test wrong groups config syntax""" f = make_temp_file(""" # A comment [Main] default: local [local] something: echo example[1-100] """) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigBasicLocalVerbose(self): """test groups with a basic local config file (verbose)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicLocalAlternative(self): """test groups with a basic local config file (= alternative)""" f = make_temp_file(""" # A comment [Main] default=local [local] map=echo example[1-100] #all= list=echo foo #reverse= """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") # @truc? def testConfigBasicEmptyDefault(self): """test groups with a empty default namespace""" f = make_temp_file(""" # A comment [Main] default: [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicNoMain(self): """test groups with a local config without main section""" f = make_temp_file(""" # A comment [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigBasicWrongDefault(self): """test groups with a wrong default namespace""" f = make_temp_file(""" # A comment [Main] default: pointless [local] map: echo example[1-100] #all: list: echo foo #reverse: """) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigQueryFailed(self): """test groups with config and failed query""" f = make_temp_file(""" # A comment [Main] default: local [local] map: false all: false list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertRaises(NodeSetExternalError, nodeset.regroup) # all_nodes() self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res) def testConfigQueryFailedReverse(self): """test groups with config and failed query (reverse)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example1 list: echo foo reverse: false """) res = GroupResolverConfig(f.name) nodeset = NodeSet("@foo", resolver=res) self.assertEqual(str(nodeset), "example1") self.assertRaises(NodeSetExternalError, nodeset.regroup) def testConfigRegroupWrongNamespace(self): """test groups by calling regroup(wrong_namespace)""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertRaises(GroupResolverSourceError, nodeset.regroup, "unknown") def testConfigNoListNoReverse(self): """test groups with no list and not reverse upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # not able to regroup, should still return valid nodeset self.assertEqual(nodeset.regroup(), "example[1-100]") def testConfigNoListButReverseQuery(self): """test groups with no list but reverse upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: echo foo reverse: echo foo """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") def testConfigNoMap(self): """test groups with no map upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] #map: echo example[1-100] all: list: echo foo #reverse: echo foo """) # map is a mandatory upcall, an exception should be raised early self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def testConfigWithEmptyList(self): """test groups with list upcall returning nothing""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: : reverse: echo foo """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") def testConfigListAllWithAll(self): """test all groups listing with all upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] all: echo foo bar list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-50]", resolver=res) self.assertEqual(str(nodeset), "example[1-50]") self.assertEqual(str(NodeSet.fromall(resolver=res)), "bar,foo") # test "@*" magic group listing nodeset = NodeSet("@*", resolver=res) self.assertEqual(str(nodeset), "bar,foo") nodeset = NodeSet("rab,@*,oof", resolver=res) self.assertEqual(str(nodeset), "bar,foo,oof,rab") # with group source nodeset = NodeSet("@local:*", resolver=res) self.assertEqual(str(nodeset), "bar,foo") nodeset = NodeSet("rab,@local:*,oof", resolver=res) self.assertEqual(str(nodeset), "bar,foo,oof,rab") def testConfigListAllWithoutAll(self): """test all groups listing without all upcall""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo bar #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-50]", resolver=res) self.assertEqual(str(nodeset), "example[1-50]") self.assertEqual(str(NodeSet.fromall(resolver=res)), "example[1-100]") # test "@*" magic group listing nodeset = NodeSet("@*", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("@*,example[101-104]", resolver=res) self.assertEqual(str(nodeset), "example[1-104]") nodeset = NodeSet("example[105-149],@*,example[101-104]", resolver=res) self.assertEqual(str(nodeset), "example[1-149]") # with group source nodeset = NodeSet("@local:*", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("example0,@local:*,example[101-110]", resolver=res) self.assertEqual(str(nodeset), "example[0-110]") def testConfigListAllNDWithoutAll(self): """test all groups listing without all upcall (nD)""" # Even in nD, ensure that $GROUP is a simple group that has been previously expanded f = make_temp_file(""" # A comment [Main] default: local [local] map: if [[ $GROUP == "x1y[3-4]" ]]; then exit 1; elif [[ $GROUP == "x1y1" ]]; then echo rack[1-5]z[1-42]; else echo rack[6-10]z[1-42]; fi #all: list: echo x1y1 x1y2 x1y[3-4] #reverse: """) res = GroupResolverConfig(f.name, illegal_chars=ILLEGAL_GROUP_CHARS) nodeset = NodeSet("rack3z40", resolver=res) self.assertEqual(str(NodeSet.fromall(resolver=res)), "rack[1-10]z[1-42]") self.assertEqual(res.grouplist(), ['x1y1', 'x1y2', 'x1y[3-4]']) # raw self.assertEqual(grouplist(resolver=res), ['x1y1', 'x1y2', 'x1y3', 'x1y4']) # cleaned # test "@*" magic group listing nodeset = NodeSet("@*", resolver=res) self.assertEqual(str(nodeset), "rack[1-10]z[1-42]") # with group source nodeset = NodeSet("@local:*", resolver=res) self.assertEqual(str(nodeset), "rack[1-10]z[1-42]") nodeset = NodeSet("rack11z1,@local:*,rack11z[2-42]", resolver=res) self.assertEqual(str(nodeset), "rack[1-11]z[1-42]") def testConfigIllegalCharsND(self): """test group list containing illegal characters""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo rack[6-10]z[1-42] #all: list: echo x1y1 x1y2 @illegal x1y[3-4] #reverse: """) res = GroupResolverConfig(f.name, illegal_chars=ILLEGAL_GROUP_CHARS) nodeset = NodeSet("rack3z40", resolver=res) self.assertRaises(GroupResolverIllegalCharError, res.grouplist) def testConfigResolverSources(self): """test sources() with groups config of 2 sources""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] [other] map: echo example[1-10] """) res = GroupResolverConfig(f.name) self.assertEqual(len(res.sources()), 2) self.assert_('local' in res.sources()) self.assert_('other' in res.sources()) def testConfigCrossRefs(self): """test groups config with cross references""" f = make_temp_file(""" # A comment [Main] default: other [local] map: echo example[1-100] [other] map: echo "foo: @local:foo" | sed -n 's/^$GROUP:\(.*\)/\\1/p' [third] map: echo -e "bar: @ref-rel\\nref-rel: @other:foo\\nref-all: @*" | sed -n 's/^$GROUP:\(.*\)/\\1/p' list: echo bar """) res = GroupResolverConfig(f.name) nodeset = NodeSet("@other:foo", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # @third:bar -> @ref-rel (third) -> @other:foo -> @local:foo -> nodes nodeset = NodeSet("@third:bar", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("@third:ref-all", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") def testConfigGroupsDirDummy(self): """test groups with groupsdir defined (dummy)""" f = make_temp_file(""" [Main] default: local groupsdir: /path/to/nowhere [local] map: echo example[1-100] #all: list: echo foo #reverse: """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]") def testConfigGroupsDirExists(self): """test groups with groupsdir defined (real, other)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: new_local groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: """ % dname) f2 = make_temp_file(""" [new_local] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@bar") self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]") finally: f2.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigGroupsMultipleDirs(self): """test groups with multiple confdir defined""" dname1 = make_temp_dir() dname2 = make_temp_dir() # Notes: # - use dname1 two times to check dup checking code # - use quotes on one of the directory path f = make_temp_file(""" [Main] default: local2 confdir: "%s" %s %s [local] map: echo example[1-100] list: echo foo """ % (dname1, dname2, dname1)) fs1 = make_temp_file(""" [local1] map: echo loc1node[1-100] list: echo bar """, suffix=".conf", dir=dname1) fs2 = make_temp_file(""" [local2] map: echo loc2node[02-50] list: echo toto """, suffix=".conf", dir=dname2) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # local self.assertEqual(nodeset.regroup("local"), "@local:foo") self.assertEqual(str(NodeSet("@local:foo", resolver=res)), "example[1-100]") # local1 nodeset = NodeSet("loc1node[1-100]", resolver=res) self.assertEqual(nodeset.regroup("local1"), "@local1:bar") self.assertEqual(str(NodeSet("@local1:bar", resolver=res)), "loc1node[1-100]") # local2 nodeset = NodeSet("loc2node[02-50]", resolver=res) self.assertEqual(nodeset.regroup(), "@toto") # default group source self.assertEqual(str(NodeSet("@toto", resolver=res)), "loc2node[02-50]") finally: fs2.close() fs1.close() f.close() shutil.rmtree(dname2, ignore_errors=True) shutil.rmtree(dname1, ignore_errors=True) def testConfigGroupsDirDupConfig(self): """test groups with duplicate in groupsdir""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: iamdup groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: """ % dname) f2 = make_temp_file(""" [iamdup] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname) f3 = make_temp_file(""" [iamdup] map: echo example[10-200] #all: list: echo patato #reverse: """, suffix=".conf", dir=dname) try: self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) finally: f3.close() f2.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigGroupsDirExistsNoOther(self): """test groups with groupsdir defined (real, no other)""" dname1 = make_temp_dir() dname2 = make_temp_dir() f = make_temp_file(""" [Main] default: new_local groupsdir: %s %s """ % (dname1, dname2)) f2 = make_temp_file(""" [new_local] map: echo example[1-100] #all: list: echo bar #reverse: """, suffix=".conf", dir=dname2) try: res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@bar") self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]") finally: f2.close() f.close() shutil.rmtree(dname1, ignore_errors=True) shutil.rmtree(dname2, ignore_errors=True) def testConfigGroupsDirNotADirectory(self): """test groups with groupsdir defined (not a directory)""" dname = make_temp_dir() fdummy = make_temp_file("wrong") f = make_temp_file(""" [Main] default: new_local groupsdir: %s """ % fdummy.name) try: self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) finally: fdummy.close() f.close() shutil.rmtree(dname, ignore_errors=True) def testConfigIllegalChars(self): """test groups with illegal characters""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo 'foo *' reverse: echo f^oo """) res = GroupResolverConfig(f.name, illegal_chars=set("@,&!&^*")) nodeset = NodeSet("example[1-100]", resolver=res) self.assertRaises(GroupResolverIllegalCharError, nodeset.groups) self.assertRaises(GroupResolverIllegalCharError, nodeset.regroup) def testConfigMaxRecursionError(self): """test groups maximum recursion depth exceeded error""" f = make_temp_file(""" # A comment [Main] default: local [local] map: echo @deep list: echo deep """) res = GroupResolverConfig(f.name) self.assertRaises(NodeSetParseError, NodeSet, "@deep", resolver=res) def testGroupResolverND(self): """test NodeSet with simple custom GroupResolver (nD)""" test_groups4 = makeTestG4() source = UpcallGroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups4.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups4.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups4.name, None) # create custom resolver with default source res = GroupResolver(source) self.assertFalse(res.has_node_groups()) self.assertFalse(res.has_node_groups("dummy_namespace")) nodeset = NodeSet("@rack-x1y2", resolver=res) self.assertEqual(nodeset, NodeSet("idaho[2-3]z1")) self.assertEqual(str(nodeset), "idaho[2-3]z1") nodeset = NodeSet("@rack-y1", resolver=res) self.assertEqual(str(nodeset), "idaho[1-2,4-5]z1") nodeset = NodeSet("@rack-all", resolver=res) self.assertEqual(str(nodeset), "idaho[1-7]z1") # test NESTED nD groups() self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x1', '@rack-x1y1', '@rack-x1y2', '@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1', '@rack-y2']) self.assertEqual(sorted(nodeset.groups(groupsource="simple").keys()), ['@simple:rack-all', '@simple:rack-x1', '@simple:rack-x1y1', '@simple:rack-x1y2', '@simple:rack-x2', '@simple:rack-x2y1', '@simple:rack-x2y2', '@simple:rack-y1', '@simple:rack-y2']) self.assertEqual(sorted(nodeset.groups(groupsource="simple", noprefix=True).keys()), ['@rack-all', '@rack-x1', '@rack-x1y1', '@rack-x1y2', '@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1', '@rack-y2']) testns = NodeSet() for gnodes, inodes in nodeset.groups().itervalues(): testns.update(inodes) self.assertEqual(testns, nodeset) # more tests with nested groups nodeset = NodeSet("idaho5z1", resolver=res) self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-y1']) nodeset = NodeSet("idaho5z1,idaho4z1", resolver=res) self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-y1']) nodeset = NodeSet("idaho5z1,idaho7z1", resolver=res) self.assertEqual(sorted(nodeset.groups().keys()), ['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1', '@rack-y2']) def testConfigCFGDIR(self): """test groups with $CFGDIR use in upcalls""" f = make_temp_file(""" [Main] default: local [local] map: echo example[1-100] list: basename $CFGDIR """) res = GroupResolverConfig(f.name) nodeset = NodeSet("example[1-100]", resolver=res) # just a trick to check $CFGDIR resolution... tmpgroup = os.path.basename(os.path.dirname(f.name)) self.assertEqual(nodeset.groups().keys(), ['@%s' % tmpgroup]) self.assertEqual(str(nodeset), "example[1-100]") self.assertEqual(nodeset.regroup(), "@%s" % tmpgroup) self.assertEqual(str(NodeSet("@%s" % tmpgroup, resolver=res)), "example[1-100]") def test_fromall_grouplist(self): """test NodeSet.fromall() without all upcall""" # Group Source that has no all upcall and that can handle special char test_groups2 = makeTestG2() source = UpcallGroupSource("simple", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups2.name, None, "sed -n 's/^\([0-9A-Za-z_-\%%]*\):.*/\\1/p' %s" % test_groups2.name, None) res = GroupResolver(source) # fromall will trigger ParserEngine.grouplist() that we want to test here nsall = NodeSet.fromall(resolver=res) # if working, group resolution worked with % char self.assertEqual(str(NodeSet.fromall(resolver=res)), "montana[32-55,87-90]") self.assertEqual(len(nsall), 28) # btw explicitly check escaped char nsesc = NodeSet('@escape%test', resolver=res) self.assertEqual(str(nsesc), 'montana[87-90]') self.assertEqual(len(nsesc), 4) nsesc2 = NodeSet('@esc%test2', resolver=res) self.assertEqual(nsesc, nsesc2) ns = NodeSet('montana[87-90]', resolver=res) # could also result in escape%test? self.assertEqual(ns.regroup(), '@esc%test2') class NodeSetGroup2GSTest(unittest.TestCase): def setUp(self): """configure simple RESOLVER_STD_GROUP""" # create temporary groups file and keep a reference to avoid file closing self.test_groups1 = makeTestG1() self.test_groups2 = makeTestG2() # create 2 GroupSource objects default = UpcallGroupSource("default", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups1.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups1.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups1.name, None) source2 = UpcallGroupSource("source2", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups2.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups2.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups2.name, None) resolver = GroupResolver(default) resolver.add_source(source2) set_std_group_resolver(resolver) def tearDown(self): """restore default RESOLVER_STD_GROUP""" set_std_group_resolver(None) del self.test_groups1 del self.test_groups2 def testGroupSyntaxes(self): """test NodeSet group operation syntaxes""" nodeset = NodeSet("@gpu") self.assertEqual(str(nodeset), "montana[38-41]") nodeset = NodeSet("@chassis[1-3,5]&@chassis[2-3]") self.assertEqual(str(nodeset), "montana[34-37]") nodeset1 = NodeSet("@io!@mds") nodeset2 = NodeSet("@oss") self.assertEqual(str(nodeset1), str(nodeset2)) self.assertEqual(str(nodeset1), "montana[4-5]") def testGroupListDefault(self): """test NodeSet group listing GroupResolver.grouplist()""" groups = std_group_resolver().grouplist() self.assertEqual(len(groups), 20) helper_groups = grouplist() self.assertEqual(len(helper_groups), 20) total = 0 nodes = NodeSet() for group in groups: ns = NodeSet("@%s" % group) total += len(ns) nodes.update(ns) self.assertEqual(total, 310) all_nodes = NodeSet.fromall() self.assertEqual(len(all_nodes), len(nodes)) self.assertEqual(all_nodes, nodes) def testGroupListSource2(self): """test NodeSet group listing GroupResolver.grouplist(source)""" groups = std_group_resolver().grouplist("source2") self.assertEqual(len(groups), 2) total = 0 for group in groups: total += len(NodeSet("@source2:%s" % group)) self.assertEqual(total, 24) def testGroupNoPrefix(self): """test NodeSet group noprefix option""" nodeset = NodeSet("montana[32-37,42-55]") self.assertEqual(nodeset.regroup("source2"), "@source2:para") self.assertEqual(nodeset.regroup("source2", noprefix=True), "@para") def testGroupGroups(self): """test NodeSet.groups()""" nodeset = NodeSet("montana[32-37,42-55]") self.assertEqual(sorted(nodeset.groups().keys()), ['@all', '@chassis1', '@chassis10', '@chassis11', '@chassis12', '@chassis2', '@chassis3', '@chassis6', '@chassis7', '@chassis8', '@chassis9', '@compute']) testns = NodeSet() for gnodes, inodes in nodeset.groups().itervalues(): testns.update(inodes) self.assertEqual(testns, nodeset) class NodeSetRegroupTest(unittest.TestCase): def setUp(self): """setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests""" set_std_group_resolver(GroupResolver()) # dummy resolver def tearDown(self): """tearDown: restore standard group resolver""" set_std_group_resolver(None) # restore std resolver def testGroupResolverReverse(self): """test NodeSet GroupResolver with reverse upcall""" test_groups3 = makeTestG3() test_reverse3 = makeTestR3() source = UpcallGroupSource("test", "sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups3.name, "sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups3.name, "sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups3.name, "awk -F: '/^$NODE:/ { gsub(\",\",\"\\n\",$2); print $2 }' %s" % test_reverse3.name) # create custom resolver with default source res = GroupResolver(source) nodeset = NodeSet("@all", resolver=res) self.assertEqual(nodeset, NodeSet("montana[32-55]")) self.assertEqual(str(nodeset), "montana[32-55]") self.assertEqual(nodeset.regroup(), "@all") self.assertEqual(nodeset.regroup(), "@all") nodeset = NodeSet("@overclock", resolver=res) self.assertEqual(nodeset, NodeSet("montana[41-42]")) self.assertEqual(str(nodeset), "montana[41-42]") self.assertEqual(nodeset.regroup(), "@overclock") self.assertEqual(nodeset.regroup(), "@overclock") nodeset = NodeSet("@gpu,@overclock", resolver=res) self.assertEqual(str(nodeset), "montana[38-42]") self.assertEqual(nodeset, NodeSet("montana[38-42]")) # un-overlap :) self.assertEqual(nodeset.regroup(), "@gpu,montana42") self.assertEqual(nodeset.regroup(), "@gpu,montana42") self.assertEqual(nodeset.regroup(overlap=True), "@gpu,@overclock") nodeset = NodeSet("montana41", resolver=res) self.assertEqual(nodeset.regroup(), "montana41") self.assertEqual(nodeset.regroup(), "montana41") # test regroup code when using unindexed node nodeset = NodeSet("idaho", resolver=res) self.assertEqual(nodeset.regroup(), "@single") self.assertEqual(nodeset.regroup(), "@single") nodeset = NodeSet("@single", resolver=res) self.assertEqual(str(nodeset), "idaho") # unresolved unindexed: nodeset = NodeSet("utah", resolver=res) self.assertEqual(nodeset.regroup(), "utah") self.assertEqual(nodeset.regroup(), "utah") nodeset = NodeSet("@all!montana38", resolver=res) self.assertEqual(nodeset, NodeSet("montana[32-37,39-55]")) self.assertEqual(str(nodeset), "montana[32-37,39-55]") self.assertEqual(nodeset.regroup(), "@para,montana[39-41]") self.assertEqual(nodeset.regroup(), "@para,montana[39-41]") self.assertEqual(nodeset.regroup(overlap=True), "@chassis[1-3],@login,@overclock,@para,montana[39-40]") self.assertEqual(nodeset.regroup(overlap=True), "@chassis[1-3],@login,@overclock,@para,montana[39-40]") nodeset = NodeSet("montana[32-37]", resolver=res) self.assertEqual(nodeset.regroup(), "@chassis[1-3]") self.assertEqual(nodeset.regroup(), "@chassis[1-3]") class StaticGroupSource(UpcallGroupSource): """ A memory only group source based on a provided dict. """ def __init__(self, name, data): all_upcall = None if 'all' in data: all_upcall = 'fake_all' list_upcall = None if 'list' in data: list_upcall = 'fake_list' UpcallGroupSource.__init__(self, name, "fake_map", all_upcall, list_upcall) self._data = data def _upcall_read(self, cmdtpl, args=dict()): if cmdtpl == 'map': return self._data[cmdtpl].get(args['GROUP']) elif cmdtpl == 'reverse': return self._data[cmdtpl].get(args['NODE']) else: return self._data[cmdtpl] class GroupSourceCacheTest(unittest.TestCase): def test_clear_cache(self): """test GroupSource.clear_cache()""" source = StaticGroupSource('cache', {'map': {'a': 'foo1', 'b': 'foo2'} }) # create custom resolver with default source res = GroupResolver(source) # Populate map cache self.assertEqual("foo1", str(NodeSet("@a", resolver=res))) self.assertEqual("foo2", str(NodeSet("@b", resolver=res))) self.assertEqual(len(source._cache['map']), 2) # Clear cache source.clear_cache() self.assertEqual(len(source._cache['map']), 0) def test_expired_cache(self): """test UpcallGroupSource cache entries expired according to config""" # create custom resolver with default source source = StaticGroupSource('cache', {'map': {'a': 'foo1', 'b': 'foo2'} }) source.cache_time = 0.2 res = GroupResolver(source) # Populate map cache self.assertEqual("foo1", str(NodeSet("@a", resolver=res))) self.assertEqual("foo2", str(NodeSet("@b", resolver=res))) self.assertEqual(len(source._cache['map']), 2) # Be sure 0.2 cache time is expired (especially for old Python version) time.sleep(0.25) source._data['map']['a'] = 'something_else' self.assertEqual('something_else', str(NodeSet("@a", resolver=res))) def test_config_cache_time(self): """test group config cache_time options""" f = make_temp_file(""" [local] cache_time: 0.2 map: echo foo1 """) res = GroupResolverConfig(f.name) self.assertEqual(res._sources['local'].cache_time, 0.2) self.assertEqual("foo1", str(NodeSet("@local:foo", resolver=res))) class GroupSourceTest(unittest.TestCase): """Test class for 1.7 dict-based GroupSource""" def test_base_class0(self): """test base GroupSource class (empty)""" gs = GroupSource("emptysrc") self.assertEqual(gs.resolv_map('gr1'), '') self.assertEqual(gs.resolv_map('gr2'), '') self.assertEqual(gs.resolv_list(), []) self.assertRaises(GroupSourceNoUpcall, gs.resolv_all) self.assertRaises(GroupSourceNoUpcall, gs.resolv_reverse, 'n4') def test_base_class1(self): """test base GroupSource class (map and list)""" gs = GroupSource("testsrc", { 'gr1': ['n1', 'n4', 'n3', 'n2'], 'gr2': ['n9', 'n4'] }) self.assertEqual(gs.resolv_map('gr1'), ['n1', 'n4', 'n3', 'n2']) self.assertEqual(gs.resolv_map('gr2'), ['n9', 'n4']) self.assertEqual(sorted(gs.resolv_list()), ['gr1', 'gr2']) self.assertRaises(GroupSourceNoUpcall, gs.resolv_all) self.assertRaises(GroupSourceNoUpcall, gs.resolv_reverse, 'n4') def test_base_class2(self): """test base GroupSource class (all)""" gs = GroupSource("testsrc", { 'gr1': ['n1', 'n4', 'n3', 'n2'], 'gr2': ['n9', 'n4'] }, 'n[1-9]') self.assertEqual(gs.resolv_all(), 'n[1-9]') class YAMLGroupLoaderTest(unittest.TestCase): def test_missing_pyyaml(self): """test YAMLGroupLoader with missing PyYAML""" sys_path_saved = sys.path try: sys.path = [] # make import yaml failed if 'yaml' in sys.modules: # forget about previous yaml import del sys.modules['yaml'] f = make_temp_file(""" vendors: apricot: node""") self.assertRaises(GroupResolverConfigError, YAMLGroupLoader, f.name) finally: sys.path = sys_path_saved def test_one_source(self): """test YAMLGroupLoader one source""" f = make_temp_file(""" vendors: apricot: node""") loader = YAMLGroupLoader(f.name) sources = list(loader) self.assertEqual(len(sources), 1) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node' }) def test_multi_sources(self): """test YAMLGroupLoader multi sources""" f = make_temp_file(""" vendors: apricot: node customers: cherry: client-4-2""") loader = YAMLGroupLoader(f.name) sources = list(loader) self.assertEqual(len(sources), 2) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node' }) self.assertEqual(loader.groups("customers"), { 'cherry': 'client-4-2' }) def test_reload(self): """test YAMLGroupLoader cache_time""" f = make_temp_file(""" vendors: apricot: "node[1-10]" avocado: 'node[11-20]' banana: node[21-30] customers: cherry: client-4-2""") loader = YAMLGroupLoader(f.name, cache_time=1) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node[1-10]', 'avocado': 'node[11-20]', 'banana': 'node[21-30]' }) # modify YAML file and check that it is reloaded after cache_time f.write("\n nut: node42\n") # oh and BTW for ultimate code coverage, test if we add a new source # on-the-fly, this is not supported but should be ignored f.write("thieves:\n pomegranate: node100\n") f.flush() time.sleep(0.1) # too soon self.assertEqual(loader.groups("customers"), { 'cherry': 'client-4-2' }) time.sleep(1.0) self.assertEqual(loader.groups("vendors"), { 'apricot': 'node[1-10]', 'avocado': 'node[11-20]', 'banana': 'node[21-30]' }) self.assertEqual(loader.groups("customers"), { 'cherry': 'client-4-2', 'nut': 'node42' }) def test_iter(self): """test YAMLGroupLoader iterator""" f = make_temp_file(""" src1: src1grp1: node11 src1grp2: node12 src2: src2grp1: node21 src2grp2: node22 src3: src3grp1: node31 src3grp2: node32""") loader = YAMLGroupLoader(f.name, cache_time = 0.1) # iterate sources with cache expired for source in loader: time.sleep(0.5) # force reload self.assertEqual(len(source.groups), 2) class GroupResolverYAMLTest(unittest.TestCase): def setUp(self): """setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests""" set_std_group_resolver(GroupResolver()) # dummy resolver def tearDown(self): """tearDown: restore standard group resolver""" set_std_group_resolver(None) # restore std resolver def test_yaml_basic(self): """test groups with a basic YAML config file""" dname = make_temp_dir() f = make_temp_file(""" # A comment [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" yaml: foo: example[1-4,91-100],example90 bar: example[5-89] """, suffix=".yaml", dir=dname) res = GroupResolverConfig(f.name) # Group resolution nodeset = NodeSet("@foo", resolver=res) self.assertEqual(str(nodeset), "example[1-4,90-100]") nodeset = NodeSet("@bar", resolver=res) self.assertEqual(str(nodeset), "example[5-89]") nodeset = NodeSet("@foo,@bar", resolver=res) self.assertEqual(str(nodeset), "example[1-100]") nodeset = NodeSet("@unknown", resolver=res) self.assertEqual(len(nodeset), 0) # Regroup nodeset = NodeSet("example[1-4,90-100]", resolver=res) self.assertEqual(str(nodeset), "example[1-4,90-100]") self.assertEqual(nodeset.regroup(), "@foo") self.assertEqual(nodeset.groups().keys(), ["@foo"]) self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-4,90-100]") # No 'all' defined: all_nodes() should raise an error self.assertRaises(GroupSourceError, res.all_nodes) # but then NodeSet falls back to the union of all groups nodeset = NodeSet.fromall(resolver=res) self.assertEqual(str(nodeset), "example[1-100]") # regroup doesn't use @all in that case self.assertEqual(nodeset.regroup(), "@bar,@foo") # No 'reverse' defined: node_groups() should raise an error self.assertRaises(GroupSourceError, res.node_groups, "example1") # regroup with rest nodeset = NodeSet("example[1-101]", resolver=res) self.assertEqual(nodeset.regroup(), "@bar,@foo,example101") # regroup incomplete nodeset = NodeSet("example[50-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[50-200]") # regroup no matching nodeset = NodeSet("example[102-200]", resolver=res) self.assertEqual(nodeset.regroup(), "example[102-200]") def test_yaml_fromall(self): """test groups special all group""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" yaml: foo: example[1-4,91-100],example90 bar: example[5-89] all: example[90-100] """, suffix=".yaml", dir=dname) res = GroupResolverConfig(f.name) nodeset = NodeSet.fromall(resolver=res) self.assertEqual(str(nodeset), "example[90-100]") # regroup uses @all if it is defined self.assertEqual(nodeset.regroup(), "@all") def test_yaml_invalid_groups_not_dict(self): """test groups with an invalid YAML config file (1)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" yaml: bar """, suffix=".yaml", dir=dname) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_yaml_invalid_root_dict(self): """test groups with an invalid YAML config file (2)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" - Casablanca - North by Northwest - The Man Who Wasn't There """, suffix=".yaml", dir=dname) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_yaml_invalid_not_yaml(self): """test groups with an invalid YAML config file (3)""" dname = make_temp_dir() f = make_temp_file(""" [Main] default: yaml autodir: %s """ % dname) yamlfile = make_temp_file(""" [Dummy] one: un two: deux three: trois """, suffix=".yaml", dir=dname) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_wrong_autodir(self): """test wrong autodir (doesn't exist)""" f = make_temp_file(""" [Main] autodir: /i/do/not/=exist= default: local """) # absent autodir itself doesn't raise any exception, but default # pointing to nothing does... self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name) def test_wrong_autodir_is_file(self): """test wrong autodir (is a file)""" fe = make_temp_file("") f = make_temp_file(""" [Main] autodir: %s default: local [local] map: node """ % fe.name) self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
en
0.542338
#!/usr/bin/env python # ClusterShell.Node* test suite Unit test for NodeSet with Group support # Wildcard import for testing purpose Create a temporary group file 1 # oss: montana5,montana4 mds: montana6 io: montana[4-6] #42: montana3 compute: montana[32-163] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] chassis4: montana[38-39] chassis5: montana[40-41] chassis6: montana[42-43] chassis7: montana[44-45] chassis8: montana[46-47] chassis9: montana[48-49] chassis10: montana[50-51] chassis11: montana[52-53] chassis12: montana[54-55] Uppercase: montana[1-2] gpuchassis: @chassis[4-5] gpu: montana[38-41] all: montana[1-6,32-163] # /!\ Need to return file object and not f1.name, otherwise the temporary # file might be immediately unlinked. Create a temporary group file 2 # # para: montana[32-37,42-55] gpu: montana[38-41] escape%test: montana[87-90] esc%test2: @escape%test Create a temporary group file 3 # # all: montana[32-55] para: montana[32-37,42-55] gpu: montana[38-41] login: montana[32-33] overclock: montana[41-42] chassis1: montana[32-33] chassis2: montana[34-35] chassis3: montana[36-37] single: idaho Create a temporary reverse group file 3 # # montana32: all,para,login,chassis1 montana33: all,para,login,chassis1 montana34: all,para,chassis2 montana35: all,para,chassis2 montana36: all,para,chassis3 montana37: all,para,chassis3 montana38: all,gpu montana39: all,gpu montana40: all,gpu montana41: all,gpu,overclock montana42: all,para,overclock montana43: all,para montana44: all,para montana45: all,para montana46: all,para montana47: all,para montana48: all,para montana49: all,para montana50: all,para montana51: all,para montana52: all,para montana53: all,para montana54: all,para montana55: all,para idaho: single Create a temporary group file 4 (nD) # rack-x1y1: idaho1z1,idaho2z1 rack-x1y2: idaho2z1,idaho3z1 rack-x2y1: idaho4z1,idaho5z1 rack-x2y2: idaho6z1,idaho7z1 rack-x1: @rack-x1y[1-2] rack-x2: @rack-x2y[1-2] rack-y1: @rack-x[1-2]y1 rack-y2: @rack-x[1-2]y2 rack-all: @rack-x[1-2]y[1-2] setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests # dummy resolver tearDown: restore standard group resolver # restore std resolver test NodeSet with simple custom GroupResolver # create custom resolver with default source # test recursive group gpuchassis # test also with some inline operations test NodeSet.fromall() with no resolver test NodeSet.groups() with no resolver test GroupResolver.add_source() error # adding the same source again should raise ValueError test NodeSet with minimal GroupResolver # create custom resolver with default source test groups with an empty configuration file # NodeSet should work # without group support test groups resolver with an empty file list # empty file list OR as if no config file is parsable # NodeSet should work # without group support test groups with a basic local config file # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: # No 'all' defined: all_nodes() should raise an error # No 'reverse' defined: node_groups() should raise an error # regroup with rest # regroup incomplete # regroup no matching test wrong groups config syntax # A comment [Main] default: local [local] something: echo example[1-100] test groups with a basic local config file (verbose) # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: test groups with a basic local config file (= alternative) # A comment [Main] default=local [local] map=echo example[1-100] #all= list=echo foo #reverse= # @truc? test groups with a empty default namespace # A comment [Main] default: [local] map: echo example[1-100] #all: list: echo foo #reverse: test groups with a local config without main section # A comment [local] map: echo example[1-100] #all: list: echo foo #reverse: test groups with a wrong default namespace # A comment [Main] default: pointless [local] map: echo example[1-100] #all: list: echo foo #reverse: test groups with config and failed query # A comment [Main] default: local [local] map: false all: false list: echo foo #reverse: # all_nodes() test groups with config and failed query (reverse) # A comment [Main] default: local [local] map: echo example1 list: echo foo reverse: false test groups by calling regroup(wrong_namespace) # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo #reverse: test groups with no list and not reverse upcall # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: #reverse: # not able to regroup, should still return valid nodeset test groups with no list but reverse upcall # A comment [Main] default: local [local] map: echo example[1-100] #all: #list: echo foo reverse: echo foo test groups with no map upcall # A comment [Main] default: local [local] #map: echo example[1-100] all: list: echo foo #reverse: echo foo # map is a mandatory upcall, an exception should be raised early test groups with list upcall returning nothing # A comment [Main] default: local [local] map: echo example[1-100] #all: list: : reverse: echo foo test all groups listing with all upcall # A comment [Main] default: local [local] map: echo example[1-100] all: echo foo bar list: echo foo #reverse: # test "@*" magic group listing # with group source test all groups listing without all upcall # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo foo bar #reverse: # test "@*" magic group listing # with group source test all groups listing without all upcall (nD) # Even in nD, ensure that $GROUP is a simple group that has been previously expanded # A comment [Main] default: local [local] map: if [[ $GROUP == "x1y[3-4]" ]]; then exit 1; elif [[ $GROUP == "x1y1" ]]; then echo rack[1-5]z[1-42]; else echo rack[6-10]z[1-42]; fi #all: list: echo x1y1 x1y2 x1y[3-4] #reverse: # raw # cleaned # test "@*" magic group listing # with group source test group list containing illegal characters # A comment [Main] default: local [local] map: echo rack[6-10]z[1-42] #all: list: echo x1y1 x1y2 @illegal x1y[3-4] #reverse: test sources() with groups config of 2 sources # A comment [Main] default: local [local] map: echo example[1-100] [other] map: echo example[1-10] test groups config with cross references # A comment [Main] default: other [local] map: echo example[1-100] [other] map: echo "foo: @local:foo" | sed -n 's/^$GROUP:\(.*\)/\\1/p' [third] map: echo -e "bar: @ref-rel\\nref-rel: @other:foo\\nref-all: @*" | sed -n 's/^$GROUP:\(.*\)/\\1/p' list: echo bar # @third:bar -> @ref-rel (third) -> @other:foo -> @local:foo -> nodes test groups with groupsdir defined (dummy) [Main] default: local groupsdir: /path/to/nowhere [local] map: echo example[1-100] #all: list: echo foo #reverse: test groups with groupsdir defined (real, other) [Main] default: new_local groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: [new_local] map: echo example[1-100] #all: list: echo bar #reverse: test groups with multiple confdir defined # Notes: # - use dname1 two times to check dup checking code # - use quotes on one of the directory path [Main] default: local2 confdir: "%s" %s %s [local] map: echo example[1-100] list: echo foo [local1] map: echo loc1node[1-100] list: echo bar [local2] map: echo loc2node[02-50] list: echo toto # local # local1 # local2 # default group source test groups with duplicate in groupsdir [Main] default: iamdup groupsdir: %s [local] map: echo example[1-100] #all: list: echo foo #reverse: [iamdup] map: echo example[1-100] #all: list: echo bar #reverse: [iamdup] map: echo example[10-200] #all: list: echo patato #reverse: test groups with groupsdir defined (real, no other) [Main] default: new_local groupsdir: %s %s [new_local] map: echo example[1-100] #all: list: echo bar #reverse: test groups with groupsdir defined (not a directory) [Main] default: new_local groupsdir: %s test groups with illegal characters # A comment [Main] default: local [local] map: echo example[1-100] #all: list: echo 'foo *' reverse: echo f^oo test groups maximum recursion depth exceeded error # A comment [Main] default: local [local] map: echo @deep list: echo deep test NodeSet with simple custom GroupResolver (nD) # create custom resolver with default source # test NESTED nD groups() # more tests with nested groups test groups with $CFGDIR use in upcalls [Main] default: local [local] map: echo example[1-100] list: basename $CFGDIR # just a trick to check $CFGDIR resolution... test NodeSet.fromall() without all upcall # Group Source that has no all upcall and that can handle special char # fromall will trigger ParserEngine.grouplist() that we want to test here # if working, group resolution worked with % char # btw explicitly check escaped char # could also result in escape%test? configure simple RESOLVER_STD_GROUP # create temporary groups file and keep a reference to avoid file closing # create 2 GroupSource objects restore default RESOLVER_STD_GROUP test NodeSet group operation syntaxes test NodeSet group listing GroupResolver.grouplist() test NodeSet group listing GroupResolver.grouplist(source) test NodeSet group noprefix option test NodeSet.groups() setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests # dummy resolver tearDown: restore standard group resolver # restore std resolver test NodeSet GroupResolver with reverse upcall # create custom resolver with default source # un-overlap :) # test regroup code when using unindexed node # unresolved unindexed: A memory only group source based on a provided dict. test GroupSource.clear_cache() # create custom resolver with default source # Populate map cache # Clear cache test UpcallGroupSource cache entries expired according to config # create custom resolver with default source # Populate map cache # Be sure 0.2 cache time is expired (especially for old Python version) test group config cache_time options [local] cache_time: 0.2 map: echo foo1 Test class for 1.7 dict-based GroupSource test base GroupSource class (empty) test base GroupSource class (map and list) test base GroupSource class (all) test YAMLGroupLoader with missing PyYAML # make import yaml failed # forget about previous yaml import vendors: apricot: node test YAMLGroupLoader one source vendors: apricot: node test YAMLGroupLoader multi sources vendors: apricot: node customers: cherry: client-4-2 test YAMLGroupLoader cache_time vendors: apricot: "node[1-10]" avocado: 'node[11-20]' banana: node[21-30] customers: cherry: client-4-2 # modify YAML file and check that it is reloaded after cache_time # oh and BTW for ultimate code coverage, test if we add a new source # on-the-fly, this is not supported but should be ignored # too soon test YAMLGroupLoader iterator src1: src1grp1: node11 src1grp2: node12 src2: src2grp1: node21 src2grp2: node22 src3: src3grp1: node31 src3grp2: node32 # iterate sources with cache expired # force reload setUp test reproducibility: change standard group resolver to ensure that no local group source is used during tests # dummy resolver tearDown: restore standard group resolver # restore std resolver test groups with a basic YAML config file # A comment [Main] default: yaml autodir: %s yaml: foo: example[1-4,91-100],example90 bar: example[5-89] # Group resolution # Regroup # No 'all' defined: all_nodes() should raise an error # but then NodeSet falls back to the union of all groups # regroup doesn't use @all in that case # No 'reverse' defined: node_groups() should raise an error # regroup with rest # regroup incomplete # regroup no matching test groups special all group [Main] default: yaml autodir: %s yaml: foo: example[1-4,91-100],example90 bar: example[5-89] all: example[90-100] # regroup uses @all if it is defined test groups with an invalid YAML config file (1) [Main] default: yaml autodir: %s yaml: bar test groups with an invalid YAML config file (2) [Main] default: yaml autodir: %s - Casablanca - North by Northwest - The Man Who Wasn't There test groups with an invalid YAML config file (3) [Main] default: yaml autodir: %s [Dummy] one: un two: deux three: trois test wrong autodir (doesn't exist) [Main] autodir: /i/do/not/=exist= default: local # absent autodir itself doesn't raise any exception, but default # pointing to nothing does... test wrong autodir (is a file) [Main] autodir: %s default: local [local] map: node
2.349771
2
Lib/site-packages/plotly/validators/sankey/link/concentrationscales/__init__.py
tytanya/my-first-blog
4
6629850
<gh_stars>1-10 from ._templateitemname import TemplateitemnameValidator from ._name import NameValidator from ._label import LabelValidator from ._colorscale import ColorscaleValidator from ._cmin import CminValidator from ._cmax import CmaxValidator
from ._templateitemname import TemplateitemnameValidator from ._name import NameValidator from ._label import LabelValidator from ._colorscale import ColorscaleValidator from ._cmin import CminValidator from ._cmax import CmaxValidator
none
1
1.044776
1