text
string
size
int64
token_count
int64
from django.contrib.auth import models as auth_models from django.db import models from django.urls import reverse class User(auth_models.AbstractUser): """Extends default model to add project specific fields.""" birth_date = models.DateField(help_text="Employee Date of birth.") address = models.TextField(help_text="Employee permanent address") locked = models.BooleanField( default=False, help_text="Whether the account has been locked by Store owner.", )
498
127
import numpy as np import pandas as pd import torch import torch.nn as nn from pytorch_model import * from pytorch_clean import * from sklearn.metrics import classification_report from torch.optim import optimizer from transformers import AutoModel, AutoTokenizer from vncorenlp import VnCoreNLP from vncorenlp.vncorenlp import VnCoreNLP from transformers import AdamW #calling pretrained model phobert=AutoModel.from_pretrained('vinai/phobert-base') tokenizer=AutoTokenizer.from_pretrained('vinai/phobert-base') rdrsegmenter=VnCoreNLP("vncorenlp/VnCoreNLP-1.1.1.jar", annotators="wseg", max_heap_size='-Xmx500m') #get data sentences,labels=get_data(['sacarism_dataset.json','normal_dataset.json']) sentences_segment(sentences, rdrsegmenter) padded,labels=shuffle_and_tokenize(sentences,labels,check_maxlen(sentences), tokenizer) X_train,X_val,X_test, y_train,y_val, y_test=split_data(padded, labels) train_dataloader, val_dataloader= Data_Loader(X_train,X_val,y_train,y_val) #freeze all the parameters for param in phobert.parameters(): param.requires_grad=False #loss cross_entropy=nn.NLLLoss() model=classify(phobert,2) optimizer=AdamW(model.parameters(),lr=1e-5) def train(): model.train() total_loss,acc=0,0 total_preds=[] for step , batch in enumerate(train_dataloader): if step%50==0 and step!=0: print("BATCH {} of {}".format(step, len(train_dataloader))) input,labels=batch model.zero_grad() preds=model(input) loss=cross_entropy(preds, labels) total_loss=total_loss+loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() preds=preds.detach().numpy() total_preds.append(preds) avg_loss=total_loss/len(train_dataloader) total_preds=np.concatenate(total_preds,axis=0) return avg_loss, total_preds def evaluate(): model.eval() total_loss,acc=0,0 total_preds=[] for step, batch in enumerate(val_dataloader): if step%50==0 and step!=0: print("BATCH {} of {}".format(step, len(val_dataloader))) input,labels=batch with torch.no_grad(): preds=model(input) loss=cross_entropy(preds, labels) total_loss+=loss.item() preds=preds.detach().numpy() total_preds.append(preds) avg_loss=total_loss/len(val_dataloader) total_preds=np.concatenate(total_preds,axis=0) return avg_loss, total_preds def run(epochs): best_valid_loss=float("inf") train_losses=[] valid_losses=[] for epoch in range(epochs): print("EPOCH {}/{}".format(epoch,epochs)) train_loss,_ =train() valid_loss,_ =evaluate() if valid_loss<best_valid_loss: best_valid_loss=valid_loss torch.save(model.state_dict(),"pytorch_trainmodel/save_weights.pt") train_losses.append(train_loss) valid_losses.append(valid_loss) print(train_loss) print(valid_loss) print("======TRAINING=======") run(10) print("======CHECKING=======") path = 'save_weights.pt' model.load_state_dict(torch.load(path)) sentence=input("Your sentence you want to preidct: ") def result(sentence): tokens=rdrsegmenter.tokenize(sentence) statement="" for token in tokens: statement+=" ".join(token) sentence=statement sequence=tokenizer.encode(sentence) while(len(sequence)<check_maxlen(sentences)): sequence.insert(0,0) padded=torch.tensor([sequence]) with torch.no_grad(): preds=model(padded) preds=np.argmax(preds,axis=1) return preds print(result(sentence)) #check test with torch.no_grad(): preds=model(X_test) preds=preds.detach().numpy() preds=np.argmax(preds,axis=1) print(classification_report(y_test, preds))
3,623
1,390
# test builtin sorted try: sorted set except: import sys print("SKIP") sys.exit() print(sorted(set(range(100)))) print(sorted(set(range(100)), key=lambda x: x + 100*(x % 2))) # need to use keyword argument try: sorted([], None) except TypeError: print("TypeError")
295
109
# -*- coding: utf-8 -*- """Serializer tests for the Mendeley addon.""" import pytest from addons.base.tests.serializers import CitationAddonSerializerTestSuiteMixin from addons.base.tests.utils import MockFolder from addons.mendeley.tests.factories import MendeleyAccountFactory from addons.mendeley.serializer import MendeleySerializer from tests.base import OsfTestCase pytestmark = pytest.mark.django_db class TestMendeleySerializer(CitationAddonSerializerTestSuiteMixin, OsfTestCase): addon_short_name = 'mendeley' Serializer = MendeleySerializer ExternalAccountFactory = MendeleyAccountFactory folder = MockFolder()
643
198
from app import run_migration from flask import current_app as app from flask_testing import TestCase from project import db class BaseTestCase(TestCase): def create_app(self): app.config.from_object('project.configs.TestingConfig') return app def setUp(self): run_migration() def tearDown(self): db.session.remove() db.drop_all() def assert201(self, response): self.assert_status(response, 201)
465
144
""" N layers optimistic neural network """ import torch class ThreeLayersNN(torch.nn.Module): def __init__(self, in_dim, out_dim, h_dim, activation='relu'): """ :param in_dim: (int) input dimension :param out_dim: (int) output dimension """ super(ThreeLayersNN, self).__init__() self.activation = activation # Initialize layers self.h_dim = h_dim self.linear1 = torch.nn.Linear(in_dim, h_dim[0]) self.linear2 = torch.nn.Linear(h_dim[0], h_dim[1]) self.linear3 = torch.nn.Linear(h_dim[1], out_dim) def forward(self, x): if self.activation == 'sigmoid': h = torch.sigmoid(self.linear1(x)) h = torch.sigmoid(self.linear2(h)) return self.linear3(h) elif self.activation == 'relu': h = self.linear1(x).clamp(min=0) h = self.linear2(h).clamp(min=0) return self.linear3(h)
956
335
''' ? (c) 2018 - laymonage ''' import os import random import requests from .dropson import dbx_dl, get_json def surprise(safe=False): ''' ? ''' cat_api = 'http://thecatapi.com/api/images/get' prev_url = requests.get(cat_api) prev_url = prev_url.url.replace('http://', 'https://') if safe: orig_url = requests.get(cat_api) orig_url = orig_url.url.replace('http://', 'https://') else: surprise_links = os.getenv('SURPRISES_FILE_PATH', None) surprises = get_json(dbx_dl(surprise_links)) orig_url = random.choice(surprises) return (orig_url, prev_url)
630
233
# Copyright 2015-2016 Tim Burke # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Middleware to set default headers for PUT requests. End Users / Application Developers =================================== With this middleware enabled, users can set X-Default-Object-* headers on accounts and containers to automatically set default headers for subsequent object PUTs, or X-Default-Container-* headers on accounts to set defaults for subsequent container PUTs. If a default is specified at multiple levels (for example, an object default is specified both at the account and container), the more-specific level's default is used. For example, in the sequence:: POST /v1/acct X-Default-Object-X-Delete-After: 2592000 POST /v1/acct/foo X-Default-Object-X-Delete-After: 86400 PUT /v1/acct/foo/o1 PUT /v1/acct/foo/o2 X-Delete-After: 3600 PUT /v1/acct/bar/o3 PUT /v1/acct/baz/o4 POST /v1/acct/baz/o4 X-Remove-Delete-At: 1 PUT /v1/other_acct/quux/o5 * ``acct/foo/o1`` will get its ``X-Delete-After`` header from the container default, so it will be automatically be deleted after 24 hours. * ``acct/foo/o2`` had its ``X-Delete-After`` header explicitly set by the client, so it will be automatically be deleted after one hour. * ``acct/bar/o3`` will get its ``X-Delete-After`` header from the account default, so it will be deleted after 30 days. * ``acct/baz/04`` will initially be set to delete after 30 days as well. However, nothing prevents you from later changing or removing the defaulted header. After the subsequent ``POST``, the object will not be automatically deleted. * ``other_acct/quux/o5`` will not be automatically deleted, as neither its account nor its container specified a default expiration time. .. note:: You may not specify defaults for any X-*-Sysmeta-* or X-Backend-* headers. This is comparable to the behavior of the gatekeeper middleware. Cluster Operators ================= Requires Swift >= 1.12.0 Pipeline Placement ------------------ This middleware should be placed as far left as possible while still being right of Swift's sane-WSGI-environment middlewares. Immediately right of ``cache`` should be reasonable. Configuration Options --------------------- use_formatting If true, expose {account}, {container}, and {object} formatting variables. This can be useful for things like setting:: X-Default-Container-X-Versions-Location: .{container}_versions Default: False default-account-* default-container-* default-object-* Used to set defaults across the entire cluster. These have lower precedence than account-level defaults. Middleware Developers ===================== This middleware adds two keys to the request environment: swift.defaulter_headers This is a comma-delimited list of the headers for which this middleware has set default values. Note that other middlewares may have modified some or all of these after the defaults were set. swift.defaulter_hook This is a callback that may be used to populate defaults for subrequests. It will only modify PUT requests. It accepts a swob.Request as an argument. """ from swift.common.request_helpers import get_sys_meta_prefix from swift.common.swob import wsgify from swift.common.utils import config_true_value from swift.common.utils import register_swift_info from swift.proxy.controllers.base import get_account_info from swift.proxy.controllers.base import get_container_info BLACKLIST = set('x-timestamp') BLACKLIST_PREFIXES = ( get_sys_meta_prefix('account'), get_sys_meta_prefix('container'), get_sys_meta_prefix('object'), 'x-backend-', ) CALLBACK_ENV_KEY = 'swift.defaulter_hook' HEADERS_ENV_KEY = 'swift.defaulter_headers' class DefaulterMiddleware(object): def __init__(self, app, config): self.app = app self.conf = config @wsgify def __call__(self, req): req.environ[CALLBACK_ENV_KEY] = self.defaulter_hook req.environ['swift.copy_hook'] = self.copy_hook(req.environ.get( 'swift.copy_hook', lambda src_req, src_resp, sink_req: src_resp)) try: vers, acct, cont, obj = req.split_path(2, 4, True) except ValueError: # /info request, or something similar return self.app handler = getattr(self, 'do_%s' % req.method.lower(), None) if not callable(handler): handler = self.get_response_and_translate if obj is not None: req_type = 'object' elif cont is not None: req_type = 'container' elif acct is not None: req_type = 'account' return handler(req, req_type) def client_to_sysmeta(self, req, req_type): subresources = { 'account': ('container', 'object'), 'container': ('object', ), }.get(req_type, ()) header_formats = ( ('x-remove-default-%s-', True), ('x-default-%s-', False), ) for header_format, clear in header_formats: for header, value in req.headers.items(): for subresource in subresources: prefix = header_format % subresource if header.lower().startswith(prefix): header_to_default = header[len(prefix):].lower() if header_to_default.startswith(BLACKLIST_PREFIXES): continue if header_to_default in BLACKLIST: continue sysmeta_header = '%sdefault-%s-%s' % ( get_sys_meta_prefix(req_type), subresource, header_to_default) req.headers[sysmeta_header] = '' if clear else value def sysmeta_to_client(self, resp, req_type): prefix = get_sys_meta_prefix(req_type) + 'default-' for header, value in resp.headers.items(): if header.lower().startswith(prefix): client_header = 'x-default-%s' % header[len(prefix):] resp.headers[client_header] = value def get_response_and_translate(self, req, req_type): resp = req.get_response(self.app) self.sysmeta_to_client(resp, req_type) return resp def do_post(self, req, req_type): if req_type == 'object': return self.get_response_and_translate(req, req_type) self.client_to_sysmeta(req, req_type) return self.get_response_and_translate(req, req_type) def defaulter_hook(self, req): '''Callback so middlewares that make subrequests can populate defaults. :param req: the swob.Request that should have its headers defaulted ''' if HEADERS_ENV_KEY in req.environ: return # We've already tried setting defaults; pass if req.method != 'PUT': return # Only set defaults during PUTs try: pieces = req.split_path(2, 4, True) except ValueError: return # /info, or something? but it's a put... what? if pieces.pop(0) != 'v1': return # Swift3 request, maybe? Doesn't look like Swift API # OK, we're reasonably assured that we're working with an account, # container or object request for which we should populate defaults. format_args = {} for val, val_type in zip(pieces, ('account', 'container', 'object')): if val is not None: format_args[val_type] = val req_type = val_type defaulted = [] for header, value in self.get_defaults( req, req_type, format_args).items(): if header not in req.headers: defaulted.append(header) req.headers[header] = value req.environ[HEADERS_ENV_KEY] = ','.join(defaulted) # Go ahead and translate to sysmeta; it allows users to set things like # X-Default-Container-X-Default-Object-X-Object-Meta-Color: blue # on their account (if they really want to) and it will Just Work. self.client_to_sysmeta(req, req_type) def copy_hook(self, inner_hook): def outer_hook(src_req, src_resp, sink_req): src_resp = inner_hook(src_req, src_resp, sink_req) if 'swift.post_as_copy' not in src_req.environ: self.defaulter_hook(sink_req) return src_resp return outer_hook def do_put(self, req, req_type): self.defaulter_hook(req) # Once we've set the defaults, we just follow the POST flow return self.do_post(req, req_type) def get_defaults(self, req, req_type, format_args): acct_sysmeta = get_account_info(req.environ, self.app)['sysmeta'] if req_type == 'object': cont_sysmeta = get_container_info(req.environ, self.app)['sysmeta'] else: cont_sysmeta = {} defaults = {} prefix = 'default-%s-' % req_type for src in (self.conf, acct_sysmeta, cont_sysmeta): for key, value in src.items(): if not key.lower().startswith(prefix): continue header_to_default = key[len(prefix):].lower() if header_to_default.startswith(BLACKLIST_PREFIXES): continue if header_to_default in BLACKLIST: continue if self.conf['use_formatting']: try: value = value.format(**format_args) except KeyError: # This user may not have specified the default; # don't fail because of someone else pass defaults[header_to_default] = value return defaults def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) conf['use_formatting'] = config_true_value(conf.get( 'use_formatting', False)) defaulting_prefixes = tuple('default-%s-' % typ for typ in ('account', 'container', 'object')) conf_to_register = { k: v for k, v in conf.items() if k == 'use_formatting' or k.startswith(defaulting_prefixes)} register_swift_info('defaulter', **conf_to_register) def filt(app): return DefaulterMiddleware(app, conf) return filt
11,019
3,209
# -*- coding: utf-8 -*- r""" Information-set decoding for linear codes Information-set decoding is a probabilistic decoding strategy that essentially tries to guess `k` correct positions in the received word, where `k` is the dimension of the code. A codeword agreeing with the received word on the guessed position can easily be computed, and their difference is one possible error vector. A "correct" guess is assumed when this error vector has low Hamming weight. This simple algorithm is not very efficient in itself, but there are numerous refinements to the strategy that make it very capable over rather large codes. Still, the decoding algorithm is exponential in dimension of the code and the log of the field size. The ISD strategy requires choosing how many errors is deemed acceptable. One choice could be `d/2`, where `d` is the minimum distance of the code, but sometimes `d` is not known, or sometimes more errors are expected. If one chooses anything above `d/2`, the algorithm does not guarantee to return a nearest codeword. AUTHORS: - David Lucas, Johan Rosenkilde, Yann Laigle-Chapuy (2016-02, 2017-06): initial version """ #****************************************************************************** # Copyright (C) 2017 David Lucas <david.lucas@inria.fr> # Johan Rosenkilde <jsrn@jsrn.dk> # Yann Laigle-Chapuy # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # http://www.gnu.org/licenses/ #****************************************************************************** from sage.all import ZZ, Integer, vector, SageObject, binomial from .decoder import Decoder def _format_decoding_interval(decoding_interval): r""" Format the decoding interval of an ISD decoder when calling ``_repr_`` or ``_latex_``. EXAMPLES:: sage: from sage.coding.information_set_decoder import _format_decoding_interval sage: _format_decoding_interval((0,3)) 'up to 3' sage: _format_decoding_interval((2,3)) 'between 2 and 3' sage: _format_decoding_interval((3,3)) 'exactly 3' """ if decoding_interval[0] == 0: return "up to {0}".format(decoding_interval[1]) if decoding_interval[0] == decoding_interval[1]: return "exactly {0}".format(decoding_interval[0]) return "between {0} and {1}".format(decoding_interval[0], decoding_interval[1]) class InformationSetAlgorithm(SageObject): r""" Abstract class for algorithms for :class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`. To sub-class this class, override ``decode`` and ``calibrate``, and call the super constructor from ``__init__``. INPUT: - ``code`` -- A linear code for which to decode. - ``number_errors`` -- an integer, the maximal number of errors to accept as correct decoding. An interval can also be specified by giving a pair of integers, where both end values are taken to be in the interval. - ``algorithm_name`` -- A name for the specific ISD algorithm used (used for printing). - ``parameters`` -- (optional) A dictionary for setting the parameters of this ISD algorithm. Note that sanity checking this dictionary for the individual sub-classes should be done in the sub-class constructor. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4)) ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors A minimal working example of how to sub-class:: sage: from sage.coding.information_set_decoder import InformationSetAlgorithm sage: from sage.coding.decoder import DecodingError sage: class MinimalISD(InformationSetAlgorithm): ....: def __init__(self, code, decoding_interval): ....: super(MinimalISD, self).__init__(code, decoding_interval, "MinimalISD") ....: def calibrate(self): ....: self._parameters = { } # calibrate parameters here ....: self._time_estimate = 10.0 # calibrated time estimate ....: def decode(self, r): ....: # decoding algorithm here ....: raise DecodingError("I failed") sage: MinimalISD(codes.GolayCode(GF(2)), (0,4)) ISD Algorithm (MinimalISD) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """ def __init__(self, code, decoding_interval, algorithm_name, parameters = None): r""" TESTS:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4)) ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """ self._code = code self._decoding_interval = decoding_interval self._algorithm_name = algorithm_name if parameters: self._parameters = parameters self._parameters_specified = True else: self._parameters_specified = False def name(self): r""" Return the name of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,2)) sage: A.name() 'Lee-Brickell' """ return self._algorithm_name def decode(self, r): r""" Decode a received word using this ISD decoding algorithm. Must be overridden by sub-classes. EXAMPLES:: sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\ [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\ [0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\ [0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]]) sage: C = codes.LinearCode(M) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (2,2)) sage: r = vector(GF(2), [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) sage: A.decode(r) (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) """ raise NotImplementedError def time_estimate(self): """ Estimate for how long this ISD algorithm takes to perform a single decoding. The estimate is for a received word whose number of errors is within the decoding interval of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,2)) sage: A.time_estimate() #random 0.0008162108571427874 """ if not hasattr(self, "_time_estimate"): self.calibrate() return self._time_estimate def calibrate(self): """ Uses test computations to estimate optimal values for any parameters this ISD algorithm may take. Must be overridden by sub-classes. If ``self._parameters_specified`` is ``False``, this method shall set ``self._parameters`` to the best parameters estimated. It shall always set ``self._time_estimate`` to the time estimate of using ``self._parameters``. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)) sage: A.calibrate() sage: A.parameters() #random {'search_size': 1} """ raise NotImplementedError def code(self): r""" Return the code associated to this ISD algorithm. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)) sage: A.code() [24, 12, 8] Extended Golay code over GF(2) """ return self._code def decoding_interval(self): r""" A pair of integers specifying the interval of number of errors this ISD algorithm will attempt to correct. The interval includes both end values. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,2)) sage: A.decoding_interval() (0, 2) """ return self._decoding_interval def parameters(self): """ Return any parameters this ISD algorithm uses. If the parameters have not already been set, efficient values will first be calibrated and returned. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4), search_size=3) sage: A.parameters() {'search_size': 3} If not set, calibration will determine a sensible value:: sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A.parameters() #random {'search_size': 1} """ if not hasattr(self, "_parameters"): self.calibrate() return self._parameters def __eq__(self, other): r""" Tests equality between ISD algorithm objects. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A == LeeBrickellISDAlgorithm(C, (0,4)) True sage: A == LeeBrickellISDAlgorithm(C, (0,5)) False sage: other_search = 1 if A.parameters()['search_size'] != 1 else 2 sage: A == LeeBrickellISDAlgorithm(C, (0,4), search_size=other_search) False ISD Algorithm objects can be equal only if they have both calibrated the parameters, or if they both had it set and to the same value:: sage: A2 = LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size']) sage: A == A2 False sage: A2 == LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size']) True """ return isinstance(other, self.__class__)\ and self.code() == other.code()\ and self.decoding_interval() == other.decoding_interval()\ and self._parameters_specified == other._parameters_specified\ and (not self._parameters_specified or self.parameters() == other.parameters()) def __hash__(self): r""" Returns the hash value of ``self``. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: hash(A) #random 5884357732955478461 sage: C2 = codes.GolayCode(GF(3)) sage: A2 = LeeBrickellISDAlgorithm(C2, (0,4)) sage: hash(A) != hash(A2) True """ return hash(str(self)) def _repr_(self): r""" Returns a string representation of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """ return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval())) def _latex_(self): r""" Returns a latex representation of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: latex(A) \textnormal{ISD Algorithm (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 4 errors} """ return "\\textnormal{{ISD Algorithm ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self._algorithm_name, self.code()._latex_(), _format_decoding_interval(self.decoding_interval())) class LeeBrickellISDAlgorithm(InformationSetAlgorithm): r""" The Lee-Brickell algorithm for information-set decoding. For a description of the information-set decoding paradigm (ISD), see :class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`. This implements the Lee-Brickell variant of ISD, see [LB1988]_ for the original binary case, and [Pet2010]_ for the `q`-ary extension. Let `C` be a `[n, k]`-linear code over `GF(q)`, and let `r \in GF(q)^{n}` be a received word in a transmission. We seek the codeword whose Hamming distance from `r` is minimal. Let `p` and `w` be integers, such that `0\leq p\leq w`, Let `G` be a generator matrix of `C`, and for any set of indices `I`, we write `G_{I}` for the matrix formed by the columns of `G` indexed by `I`. The Lee-Brickell ISD loops the following until it is successful: 1. Choose an information set `I` of `C`. 2. Compute `r' = r - r_{I}\times G_I^{-1} \times G` 3. Consider every size-`p` subset of `I`, `\{a_1, \dots, a_p\}`. For each `m = (m_1, \dots, m_p) \in GF(q)^{p}`, compute the error vector `e = r' - \sum_{i=1}^{p} m_i\times g_{a_i}`, 4. If `e` has a Hamming weight at most `w`, return `r-e`. INPUT: - ``code`` -- A linear code for which to decode. - ``decoding_interval`` -- a pair of integers specifying an interval of number of errors to correct. Includes both end values. - ``search_size`` -- (optional) the size of subsets to use on step 3 of the algorithm as described above. Usually a small number. It has to be at most the largest allowed number of errors. A good choice will be approximated if this option is not set; see :meth:`sage.coding.LeeBrickellISDAlgorithm.calibrate` for details. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (2,3)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 3 errors """ def __init__(self, code, decoding_interval, search_size = None): r""" TESTS: If ``search_size`` is not a positive integer, or is bigger than the decoding radius, an error will be raised:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=-1) Traceback (most recent call last): ... ValueError: The search size parameter has to be a positive integer sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=4) Traceback (most recent call last): ... ValueError: The search size parameter has to be at most the maximal number of allowed errors """ if search_size is not None: if not isinstance(search_size, (Integer, int)) or search_size < 0: raise ValueError("The search size parameter has to be a positive integer") if search_size > decoding_interval[1]: raise ValueError("The search size parameter has to be at most" " the maximal number of allowed errors") super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell", parameters={ 'search_size': search_size }) self._parameters_specified = True else: self._parameters_specified = False super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell") def decode(self, r): r""" The Lee-Brickell algorithm as described in the class doc. Note that either parameters must be given at construction time or :meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.calibrate()` should be called before calling this method. INPUT: - `r` -- a received word, i.e. a vector in the ambient space of :meth:`decoder.Decoder.code`. OUTPUT: A codeword whose distance to `r` satisfies ``self.decoding_interval()``. EXAMPLES:: sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\ [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\ [0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\ [0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]]) sage: C = codes.LinearCode(M) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (2,2)) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: c_out = A.decode(r) sage: (r - c).hamming_weight() == 2 True """ import itertools from sage.misc.prandom import sample C = self.code() n, k = C.length(), C.dimension() tau = self.decoding_interval() p = self.parameters()['search_size'] F = C.base_ring() G = C.generator_matrix() Fstar = F.list()[1:] while True: # step 1. I = sample(range(n), k) Gi = G.matrix_from_columns(I) try: Gi_inv = Gi.inverse() except ZeroDivisionError: # I was not an information set continue Gt = Gi_inv * G #step 2. y = r - vector([r[i] for i in I]) * Gt g = Gt.rows() #step 3. for pi in range(p+1): for A in itertools.combinations(range(k), pi): for m in itertools.product(Fstar, repeat=pi): e = y - sum(m[i]*g[A[i]] for i in range(pi)) errs = e.hamming_weight() if errs >= tau[0] and errs <= tau[1]: return r - e def calibrate(self): r""" Run some test computations to estimate the optimal search size. Let `p` be the search size. We should simply choose `p` such that the average expected time is minimal. The algorithm succeeds when it chooses an information set with at least `k - p` correct positions, where `k` is the dimension of the code and `p` the search size. The expected number of trials we need before this occurs is: .. MATH:: \binom{n}{k}/(\rho \sum_{i=0}^p \binom{n-\tau}{k-i} \binom{\tau}{i}) Here `\rho` is the fraction of `k` subsets of indices which are information sets. If `T` is the average time for steps 1 and 2 (including selecting `I` until an information set is found), while `P(i)` is the time for the body of the ``for``-loop in step 3 for `m` of weight `i`, then each information set trial takes roughly time `T + \sum_{i=0}^{p} P(i) \binom{k}{i} (q-1)^i`, where `\GF{q}` is the base field. The values `T` and `P` are here estimated by running a few test computations similar to those done by the decoding algorithm. We don't explicitly estimate `\rho`. OUTPUT: Does not output anything but sets private fields used by :meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.parameters()` and :meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.time_estimate()``. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors sage: A.calibrate() sage: A.parameters() #random {'search_size': 1} sage: A.time_estimate() #random 0.0008162108571427874 If we specify the parameter at construction time, calibrate does not override this choice:: sage: A = LeeBrickellISDAlgorithm(C, (0,3), search_size=2); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors sage: A.parameters() {'search_size': 2} sage: A.calibrate() sage: A.parameters() {'search_size': 2} sage: A.time_estimate() #random 0.0008162108571427874 """ from sage.matrix.special import random_matrix from sage.misc.prandom import sample, randint from sage.modules.free_module_element import random_vector from time import process_time C = self.code() G = C.generator_matrix() n, k = C.length(), C.dimension() tau = self.decoding_interval()[1] F = C.base_ring() q = F.cardinality() Fstar = F.list()[1:] def time_information_set_steps(): before = process_time() while True: I = sample(range(n), k) Gi = G.matrix_from_columns(I) try: Gi_inv = Gi.inverse() except ZeroDivisionError: continue return process_time() - before def time_search_loop(p): y = random_vector(F, n) g = random_matrix(F, p, n).rows() scalars = [ [ Fstar[randint(0,q-2)] for i in range(p) ] for s in range(100) ] before = process_time() for m in scalars: e = y - sum(m[i]*g[i] for i in range(p)) return (process_time() - before) / 100. T = sum([ time_information_set_steps() for s in range(5) ]) / 5. P = [ time_search_loop(p) for p in range(tau+1) ] def compute_estimate(p): iters = 1.* binomial(n, k)/ \ sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) ) estimate = iters*(T + \ sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) )) return estimate if self._parameters_specified: self._time_estimate = compute_estimate(self._parameters['search_size']) else: self._calibrate_select([ compute_estimate(p) for p in range(tau+1) ]) def _calibrate_select(self, estimates): r""" Internal method used by ``self.calibrate()``. Given the timing estimates, select the best parameter and set the appropriate private fields. INPUT: - `estimates` - list of time estimates, for the search size set to the index of the list entry. OUTPUT: None, but sets the private fields `self._parameters` and `self._time_estimate`. TESTS:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors sage: A._calibrate_select([ 1.0, 2.0, 3.0, 0.5, 0.6, 1.0 ]) sage: A._time_estimate 0.500000000000000 sage: A._parameters {'search_size': 3} """ search_size = 0 for p in range(1, len(estimates)): if estimates[p] < estimates[search_size]: search_size = p self._parameters = { 'search_size': search_size } self._time_estimate = estimates[search_size] class LinearCodeInformationSetDecoder(Decoder): r""" Information-set decoder for any linear code. Information-set decoding is a probabilistic decoding strategy that essentially tries to guess `k` correct positions in the received word, where `k` is the dimension of the code. A codeword agreeing with the received word on the guessed position can easily be computed, and their difference is one possible error vector. A "correct" guess is assumed when this error vector has low Hamming weight. The ISD strategy requires choosing how many errors is deemed acceptable. One choice could be `d/2`, where `d` is the minimum distance of the code, but sometimes `d` is not known, or sometimes more errors are expected. If one chooses anything above `d/2`, the algorithm does not guarantee to return a nearest codeword. This simple algorithm is not very efficient in itself, but there are numerous refinements to the strategy. Specifying which strategy to use among those that Sage knows is done using the ``algorithm`` keyword. If this is not set, an efficient choice will be made for you. The various ISD algorithms all need to select a number of parameters. If you choose a specific algorithm to use, you can pass these parameters as named parameters directly to this class' constructor. If you don't, efficient choices will be calibrated for you. .. WARNING:: If there is no codeword within the specified decoding distance, then the decoder may never terminate, or it may raise a :exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD algorithm used. INPUT: - ``code`` -- A linear code for which to decode. - ``number_errors`` -- an integer, the maximal number of errors to accept as correct decoding. An interval can also be specified by giving a pair of integers, where both end values are taken to be in the interval. - ``algorithm`` -- (optional) the string name of the ISD algorithm to employ. If this is not set, an appropriate one will be chosen. A constructed :class:`sage.coding.information_set_decoder.InformationSetAlgorithm` object may also be given. In this case ``number_errors`` must match that of the passed algorithm. - ``**kwargs`` -- (optional) any number of named arguments passed on to the ISD algorithm. Such are usually not required, and they can only be set if ``algorithm`` is set to a specific algorithm. See the documentation for each individual ISD algorithm class for information on any named arguments they may accept. The easiest way to access this documentation is to first construct the decoder without passing any named arguments, then accessing the ISD algorithm using :meth:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder.algorithm`, and then reading the `?` help on the constructed object. EXAMPLES: The principal way to access this class is through the :meth:`sage.code.linear_code.AbstractLinearCode.decoder` method:: sage: C = codes.GolayCode(GF(3)) sage: D = C.decoder("InformationSet", 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors You can specify which algorithm you wish to use, and you should do so in order to pass special parameters to it:: sage: C = codes.GolayCode(GF(3)) sage: D2 = C.decoder("InformationSet", 2, algorithm="Lee-Brickell", search_size=2); D2 Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors sage: D2.algorithm() ISD Algorithm (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors sage: D2.algorithm().parameters() {'search_size': 2} If you specify an algorithm which is not known, you get a friendly error message:: sage: C.decoder("InformationSet", 2, algorithm="NoSuchThing") Traceback (most recent call last): ... ValueError: Unknown ISD algorithm 'NoSuchThing'. The known algorithms are ['Lee-Brickell']. You can also construct an ISD algorithm separately and pass that. This is mostly useful if you write your own ISD algorithms:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0, 2)) sage: D = C.decoder("InformationSet", 2, algorithm=A); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors When passing an already constructed ISD algorithm, you can't also pass parameters to the ISD algorithm when constructing the decoder:: sage: C.decoder("InformationSet", 2, algorithm=A, search_size=2) Traceback (most recent call last): ... ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm We can also information-set decode non-binary codes:: sage: C = codes.GolayCode(GF(3)) sage: D = C.decoder("InformationSet", 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors There are two other ways to access this class:: sage: D = codes.decoders.LinearCodeInformationSetDecoder(C, 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder sage: D = LinearCodeInformationSetDecoder(C, 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors """ def __init__(self, code, number_errors, algorithm=None, **kwargs): r""" TESTS: ``number_errors`` has to be either a list of Integers/ints, a tuple of Integers/ints, or an Integer/int:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", "aa") Traceback (most recent call last): ... ValueError: number_errors should be an integer or a pair of integers If ``number_errors`` is passed as a list/tuple, it has to contain only two values, the first one being at most the second one:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", (4, 2)) Traceback (most recent call last): ... ValueError: number_errors should be a positive integer or a valid interval within the positive integers You cannot ask the decoder to correct more errors than the code length:: sage: D = C.decoder("InformationSet", 25) Traceback (most recent call last): ... ValueError: The provided number of errors should be at most the code's length If ``algorithm`` is not set, additional parameters cannot be passed to the ISD algorithm:: sage: D = C.decoder("InformationSet", 2, search_size=2) Traceback (most recent call last): ... ValueError: Additional arguments to an information-set decoder algorithm are only allowed if a specific algorithm is selected by setting the algorithm keyword If ``algorithm`` is set to a constructed ISD algorithm, additional parameters cannot be passed to the ISD algorithm:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0, 2)) sage: D = C.decoder("InformationSet", 2, A, search_size=3) Traceback (most recent call last): ... ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm If ``algorithm`` is set to a constructed :class:`sage.coding.information_set_decoder.InformationSetAlgorithm`, then ``number_errors`` must match that of the algorithm:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0, 2)) sage: D = C.decoder("InformationSet", 2, A); D Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors sage: D = C.decoder("InformationSet", (0,2), A); D Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors sage: D = C.decoder("InformationSet", 3, A); D Traceback (most recent call last): ... ValueError: number_errors must match that of the passed ISD algorithm """ if isinstance(number_errors, (Integer, int)): number_errors = (0, number_errors) if isinstance(number_errors, (tuple, list)) and len(number_errors) == 2 \ and number_errors[0] in ZZ and number_errors[1] in ZZ: if 0 > number_errors[0] or number_errors[0] > number_errors[1]: raise ValueError( "number_errors should be a positive integer or" " a valid interval within the positive integers") if number_errors[1] > code.length(): raise ValueError("The provided number of errors should be at" " most the code's length") else: raise ValueError("number_errors should be an integer or a pair of integers") self._number_errors = number_errors super(LinearCodeInformationSetDecoder, self).__init__( code, code.ambient_space(), code._default_encoder_name) if algorithm is None: if kwargs: raise ValueError("Additional arguments to an information-set decoder" " algorithm are only allowed if a specific" " algorithm is selected by setting the algorithm" " keyword") algorithm = "Lee-Brickell" algorithm_names = LinearCodeInformationSetDecoder.known_algorithms(dictionary=True) if isinstance(algorithm, InformationSetAlgorithm): if kwargs: raise ValueError("ISD algorithm arguments are not allowed when" " supplying a constructed ISD algorithm") if number_errors != algorithm.decoding_interval(): raise ValueError("number_errors must match that of the passed" " ISD algorithm") self._algorithm = algorithm elif algorithm in algorithm_names: self._algorithm = algorithm_names[algorithm](code, number_errors, **kwargs) else: raise ValueError("Unknown ISD algorithm '{}'." " The known algorithms are {}."\ .format(algorithm, sorted(algorithm_names))) _known_algorithms = { "Lee-Brickell": LeeBrickellISDAlgorithm } @staticmethod def known_algorithms(dictionary=False): r""" Return the list of ISD algorithms that Sage knows. Passing any of these to the constructor of :class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder` will make the ISD decoder use that algorithm. INPUT: - ``dictionary`` - optional. If set to ``True``, return a ``dict`` mapping decoding algorithm name to its class. OUTPUT: a list of strings or a ``dict`` from string to ISD algorithm class. EXAMPLES:: sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder sage: sorted(LinearCodeInformationSetDecoder.known_algorithms()) ['Lee-Brickell'] """ if dictionary: return LinearCodeInformationSetDecoder._known_algorithms else: return LinearCodeInformationSetDecoder._known_algorithms.keys() def algorithm(self): r""" Return the ISD algorithm used by this ISD decoder. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", (2,4), "Lee-Brickell") sage: D.algorithm() ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 4 errors """ return self._algorithm def decode_to_code(self, r): r""" Decodes a received word with respect to the associated code of this decoder. .. WARNING:: If there is no codeword within the decoding radius of this decoder, this method may never terminate, or it may raise a :exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD algorithm used. INPUT: - ``r`` -- a vector in the ambient space of :meth:`decoder.Decoder.code`. OUTPUT: a codeword of :meth:`decoder.Decoder.code`. EXAMPLES:: sage: M = matrix(GF(2), [[1,0,0,0,0,0,1,0,1,0,1,1,0,0,1],\ [0,1,0,0,0,1,1,1,1,0,0,0,0,1,1],\ [0,0,1,0,0,0,0,1,0,1,1,1,1,1,0],\ [0,0,0,1,0,0,1,0,1,0,0,0,1,1,0],\ [0,0,0,0,1,0,0,0,1,0,1,1,0,1,0]]) sage: C = LinearCode(M) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: D = C.decoder('InformationSet', 2) sage: c == D.decode_to_code(r) True Information-set decoding a non-binary code:: sage: C = codes.GolayCode(GF(3)); C [12, 6, 6] Extended Golay code over GF(3) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: D = C.decoder('InformationSet', 2) sage: c == D.decode_to_code(r) True Let's take a bigger example, for which syndrome decoding or nearest-neighbor decoding would be infeasible: the `[59, 30]` Quadratic Residue code over `\GF{3}` has true minimum distance 17, so we can correct 8 errors:: sage: C = codes.QuadraticResidueCode(59, GF(3)) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: D = C.decoder('InformationSet', 8) sage: c == D.decode_to_code(r) # long time True """ C = self.code() if r in C: return r return self.algorithm().decode(r) def decoding_radius(self): r""" Return the maximal number of errors this decoder can decode. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", 2) sage: D.decoding_radius() 2 """ return self._number_errors[1] def decoding_interval(self): r""" A pair of integers specifying the interval of number of errors this decoder will attempt to correct. The interval includes both end values. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", 2) sage: D.decoding_interval() (0, 2) """ return self._number_errors def _repr_(self): r""" Returns a string representation of this decoding algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", 2) sage: D Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors """ return "Information-set decoder ({}) for {} decoding {} errors ".format(self.algorithm().name(), self.code(), _format_decoding_interval(self.decoding_interval())) def _latex_(self): r""" Returns a latex representation of this decoding algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: D = C.decoder("InformationSet", 2) sage: latex(D) \textnormal{Information-set decoder (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 2 errors} """ return "\\textnormal{{Information-set decoder ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self.algorithm().name(), self.code()._latex_(), _format_decoding_interval(self.decoding_interval())) LinearCodeInformationSetDecoder._decoder_type = {"hard-decision", "probabilistic", "not-always-closest", "bounded-distance", "might-fail"}
42,873
13,086
# coding: utf-8 """ Usage: python3 -m satelitedl --date 19900101 Authore: Ryosuke Tomita Date: 2021/11/02 """ import os from os.path import abspath, dirname, join from datetime import datetime from typing import Union from .options import parse_args from .scrapeimg import ScrapingImg def judge_date_type(date: str) -> str: """judge_date_type. Using "date"(str) length, judge date type. Args: date (str): date Returns: str: """ if len(date) == 8: date_type = "day" elif len(date) == 6: date_type = "month" elif len(date) == 4: date_type = "year" else: raise Exception (f'{date} is not valid value.') return date_type def create_url(date: str) -> Union[str, None]: """create_url. Args: date (str): date Returns: Union[str, None]: """ try: date_str = datetime.strptime(date, "%Y%m%d") except ValueError: return None url_data_part = date_str.strftime("%Y/%m/%d/") base_url = "http://weather.is.kochi-u.ac.jp/sat/ALL/" return base_url + url_data_part def _run_scrape_img(date: str): """run_scrape_img. Args: _date (str): _date """ url = create_url(date) if url is None: return scrape_img = ScrapingImg(url) scrape_img.fetch_img_url() scrape_img.download_file() def _mk_save_dir(date_day: str, outdir: str): year, month, day = date_day[0:4], date_day[4:6], date_day[6:8] year_dir = join(abspath(outdir), year) month_dir = join(year_dir, month) day_dir = join(month_dir, day) for dir_ in [year_dir, month_dir, day_dir]: if not os.path.exists(dir_): os.mkdir(dir_) return day_dir def use_scrapeimg(date: str, date_type: str, outdir: str): """use_scrapeimg. use module "scrapeimg". url are created by create_url(). Args: date (str): date date_type (str): date_type """ month_list = tuple([f'{m+1:02}' for m in range(12)]) day_list = tuple([f'{y+1:02}' for y in range(31)]) if date_type == "year": for month in month_list: for day in day_list: date_day = (date + month + day) save_dir = _mk_save_dir(date_day, outdir) os.chdir(save_dir) _run_scrape_img(date_day) os.chdir(abspath(dirname(__file__))) elif date_type == "month": for day in day_list: date_day = (date + day) save_dir = _mk_save_dir(date_day, outdir) os.chdir(save_dir) _run_scrape_img(date_day) os.chdir(abspath(dirname(__file__))) elif date_type == "day": date_day = date save_dir = _mk_save_dir(date_day, outdir) os.chdir(save_dir) _run_scrape_img(date_day) os.chdir(abspath(dirname(__file__))) def main(): """main 1. get argument. 2. scrape satelite picture from "http://weather.is.kochi-u.ac.jp/sat/ALL/" """ args = parse_args() date = args["date"] if date is None: raise Exception('No argument about date.') outdir = args["outdir"] if outdir is None: outdir = dirname(__file__) date_type = judge_date_type(date) use_scrapeimg(date, date_type, outdir) __all__ = ["main", "create_url", "judge_date_type", "use_scrapeimg"]
3,380
1,260
from .four_momentum import FourMomentum class Particle: def __init__(self, pdg_id, momentum): self.pdg_id = pdg_id self.momentum = momentum self.momentum.set_basis( (momentum.px,momentum.py, momentum.pz, Particle.get_mass(pdg_id)), 'x,y,z,m' ) def mass(self): return self.momentum.m @staticmethod def get_mass( pdg_id): # Electrons if abs(pdg_id) == 11: return 0.5e-3 # Muons if abs(pdg_id) == 13: return 106e-3 return 1.0
602
225
from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum from icolos.utils.enums.program_parameters import GromacsEnum from icolos.core.workflow_steps.gromacs.base import StepGromacsBase from icolos.utils.execute_external.gromacs import GromacsExecutor from pydantic import BaseModel from icolos.core.workflow_steps.step import _LE import os _GE = GromacsEnum() _SGE = StepGromacsEnum() _SBE = StepBaseEnum class StepGMXGenion(StepGromacsBase, BaseModel): """ Wrapper for gmx genion """ def __init__(self, **data): super().__init__(**data) self._initialize_backend(executor=GromacsExecutor) self._check_backend_availability() def execute(self): tmp_dir = self._make_tmpdir() self._write_input_files(tmp_dir) arguments = self._parse_arguments( { # input file paths are handled internally "-o": _SGE.STD_STRUCTURE, "-p": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TOPOL), "-s": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TPR), } ) result = self._backend_executor.execute( command=_GE.GENION, arguments=arguments, location=tmp_dir, pipe_input=self.construct_pipe_arguments( tmp_dir, self.settings.additional[_SBE.PIPE_INPUT] ), ) for line in result.stdout.split("\n"): self._logger_blank.log(line, _LE.DEBUG) self._logger.log( f"Completed execution for {self.step_id} successfully", _LE.INFO ) # this is the last structural change to the topology in a regular gromacs setup, # update the index groups here make_ndx_args = ["-f", _SGE.STD_STRUCTURE, "-o", _SGE.STD_INDEX] index_files = [f for f in os.listdir(tmp_dir) if f.endswith(".ndx")] # remove any existing index files for f in index_files: self._remove_temporary(os.path.join(tmp_dir, f)) # generate new index file result = self._backend_executor.execute( command=_GE.MAKE_NDX, arguments=make_ndx_args, location=tmp_dir, check=True, pipe_input='echo -e "1 | 12 \nq"', ) for line in result.stdout.split("\n"): self._logger_blank.log(line, _LE.DEBUG) self._logger.log('Added index group to "index.ndx"', _LE.DEBUG) self._parse_output(tmp_dir) self._remove_temporary(tmp_dir)
2,572
811
import math import tensorflow as tf from mayo.log import log from mayo.util import ( Percent, memoize_method, memoize_property, object_from_params) from mayo.session.base import SessionBase class Train(SessionBase): mode = 'train' def __init__(self, config): super().__init__(config) self._run_train_ops = True self._setup_train_operation() self._init() self._checkpoint_epoch = '' @memoize_property def learning_rate(self): params = self.config.train.learning_rate lr_class, params = object_from_params(params) if lr_class is tf.train.piecewise_constant: # `tf.train.piecewise_constant` uses argument name 'x' instead # just to make life more difficult step_name = 'x' else: step_name = 'global_step' params[step_name] = self.num_epochs log.debug( 'Using learning rate {!r} with params {}.' .format(lr_class.__name__, params)) return lr_class(**params) @memoize_property def optimizer(self): params = self.config.train.optimizer optimizer_class, params = object_from_params(params) log.debug('Using optimizer {!r}.'.format(optimizer_class.__name__)) return optimizer_class(self.learning_rate, **params) @staticmethod def _average_gradients(tower_grads): tower_grads = list(tower_grads) if len(tower_grads) == 1: return tower_grads[0] average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, v in grad_and_vars: # add 0 dimension to the gradients to represent the tower if g is None: raise ValueError( 'Gradient for variable {} is None, please check ' 'connection.'.format(v)) g = tf.expand_dims(g, 0) grads.append(g) # average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # simply return the first tower's pointer to the Variable v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads @staticmethod def _loss_formatter(key, name): def formatter(estimator): loss_mean, loss_std = estimator.get_mean_std(key) if math.isnan(loss_mean): raise ValueError('Model diverged with a nan-valued loss.') loss_std = '±{}'.format(Percent(loss_std / loss_mean)) return '{}: {:10f}{:5}'.format(name, loss_mean, loss_std) return formatter @memoize_method def _losses_and_gradients(self): formatter = self._loss_formatter('regularization', 'regu') regularization = self.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES, first_gpu=True) if regularization: self.estimator.register( tf.add_n(regularization), 'regularization', formatter=formatter) def gradient(net, prediction, truth): loss = [self.task.train(net, prediction, truth)] + regularization loss = tf.add_n(loss) return loss, self.optimizer.compute_gradients(loss) tower_losses, tower_grads = zip(*self.task.map(gradient)) return tower_losses, self._average_gradients(tower_grads) def _setup_train_operation(self): ops = {} self._losses, gradients = self._losses_and_gradients() self._mean_loss = tf.reduce_mean(self._losses) ops['app_grad'] = self.optimizer.apply_gradients(gradients) # update ops update_ops = list(self.get_collection(tf.GraphKeys.UPDATE_OPS)) ops['update'] = tf.group(*update_ops, name='update') log.debug('Using update operations: {}'.format(update_ops)) log.debug('Using training operations: {}'.format(ops)) if self.extra_train_ops: ops['extra'] = self.extra_train_ops self._train_op = ops def _init(self): self.load_checkpoint(self.config.system.checkpoint.load) formatter = self._loss_formatter('loss', 'loss') self.estimator.register(self._mean_loss, 'loss', formatter=formatter) def reset_num_epochs(self): log.info('Reseting number of training epochs of the model...') self.run(self.imgs_seen.initializer) self.change.reset('checkpoint.epoch') self.change.reset('step') def once(self): train_op = self._train_op if self._run_train_ops else [] tasks = [train_op, self.num_epochs] _, num_epochs = self.run(tasks, batch=True) return num_epochs def overriders_assign(self): log.info('Assigning overridden values of parameters to parameters...') self._overriders_call('assign') def overriders_update(self): log.info('Updating overrider internal variables...') self._overriders_call('update') def overriders_reset(self): log.info('Resetting overriders internal variables...') self._overriders_call('reset') def _iteration(self, max_epochs=None): system = self.config.system epoch = self.once() floor_epoch = math.floor(epoch) cp_interval = system.checkpoint.get('save.interval', 0) if self.change.every('checkpoint.epoch', floor_epoch, cp_interval): log.info( 'Saving checkpoint at epoch {}...'.format(epoch), update=True) with log.demote(): self.save_checkpoint(floor_epoch) self._checkpoint_epoch = floor_epoch max_epochs = max_epochs or system.max_epochs if max_epochs and epoch >= max_epochs: log.info( 'Maximum epoch count {} reached.'.format(max_epochs)) if self._checkpoint_epoch and floor_epoch > self._checkpoint_epoch: log.info('Saving final checkpoint...') self.save_checkpoint(floor_epoch) return False return True def train(self, max_epochs=None): # final debug outputs lr = self.run(self.learning_rate) log.info('Training start with a learning rate {}.'.format(lr)) try: # train iterations while self._iteration(max_epochs=max_epochs): pass except KeyboardInterrupt: log.info('Stopped.') save = self.config.system.checkpoint.get('save', {}) if save: countdown = save.get('countdown', 0) if log.countdown('Saving checkpoint', countdown): self.save_checkpoint('latest')
6,811
2,039
def is_lazy_user(user): """ Return True if the passed user is a lazy user. """ # Anonymous users are not lazy. if user.is_anonymous: return False # Check the user backend. If the lazy signup backend # authenticated them, then the user is lazy. backend = getattr(user, 'backend', None) if backend == 'lazysignup.backends.LazySignupBackend': return True # Otherwise, we have to fall back to checking the database. from lazysignup.models import LazyUser return bool(LazyUser.objects.filter(user=user).count() > 0)
570
171
# -*- coding: utf-8 -*- # This script copies a directory of OBS-tQ or OBS-tN markdown files to a second location. # It cleans up the files in these ways: # Ensures blank lines surrounding markdown headers. # Fixes links of this form [[:en:...]] # Removes leading spaces. # Global variables source_dir = r'C:\DCS\Russian\OBS-TN\content' target_dir = r'C:\DCS\Russian\ru_obs-tn.work\content' # path should end with "\content" resource_type = 'obs-tn' language_code = 'ru' import re import io import os import sys import convert2md # Returns path of .md file in target directory. def makeMdPath(story, fname): mdPath = os.path.join(target_dir, story) if not os.path.isdir(mdPath): os.mkdir(mdPath) return os.path.join(mdPath, fname) #prefix_re = re.compile(r'C:\\DCS') def shortname(longpath): shortname = longpath if source_dir in longpath: shortname = longpath[len(source_dir)+1:] return shortname # Converts .md file in fullpath location to .md file in target dir. def convertFile(story, fname, fullpath): if os.access(fullpath, os.F_OK): mdPath = makeMdPath(story, fname) convert2md.md2md(fullpath, mdPath, language_code, shortname) # This method is called to convert the text files in the specified story folder. # It renames files that have only a single digit in the name. def convertStory(story, fullpath): for fname in os.listdir(fullpath): if re.match('\d\.md', fname): goodPath = os.path.join(fullpath, '0' + fname) if not os.path.exists(goodPath): badPath = os.path.join(fullpath, fname) os.rename(badPath, goodPath) fname = '0' + fname if (re.match('\d\d\.md', fname) and fname != '00.md'): convertFile(story, fname, os.path.join(fullpath, fname)) # It looks like OBS-sQ repos consist only of 50 .md files in one folder def convertSQfolder(folder): for fname in os.listdir(folder): if re.match('\d\d\.md', fname): convertFile("", fname, os.path.join(folder, fname)) # Converts the stories contained in the specified folder def convert(source_dir): if not os.path.isdir(target_dir): os.mkdir(target_dir) if resource_type == 'obs-sq': convertSQfolder(source_dir) else: for item in os.listdir(source_dir): folder = os.path.join(source_dir, item) if os.path.isdir(folder): convertStory(item, folder) # Processes each directory and its files one at a time if __name__ == "__main__": if len(sys.argv) > 1 and sys.argv[1] != 'hard-coded-path': source_dir = sys.argv[1] if os.path.isdir(source_dir): convert(source_dir) print("\nDone.")
2,753
917
# -*- coding: utf-8 -*- """ Created on Mon Nov 20 23:06:49 2017 @author: Grant An implementation of the Thomas algorithm in Python, using just-in-time compiling from numba for additional speed """ import numpy as np from numba import njit, f8 def solve(A, d): '''Helper function for Thomas algorith. Breaks matrix into tridiagonal elements for easier processing by algorithm. ''' # pass numba float64 dtype np.arrays to the solve function - need to # perform this step to allow for nopython execution of thomas algorithm # which yields maximum speed a = f8(np.diagonal(A, offset=0)) b = f8(np.diagonal(A, offset=1)) c = f8(np.diagonal(A, offset=-1)) dfloat = f8(d) D = np.diag(a, 0) + np.diag(b, 1) + np.diag(c, -1) #create test matrix # test if D is 'close enough' to A - if not that means A was not # tridiagonal and the function raises an exception if not np.allclose(A, D): raise Exception('The given A is not tridiagonal') # pass to thomas algorithm solver x = solve_body(a, b, c, dfloat) return x # chose to use njit decorator to force nopython implementation and # get faster speed. Downside is I lose flexibility in input of solver, must # wrap in another function which will format data correctly @njit def solve_body(a, b, c, d): ''' Thomas algorithm to solve a tridiagonal system of equations INPUTS ======== a: numpy array the diagonal entries b: numpy array the superdiagonal entries c: numpy array the subdiagonal entries d: numpy array the right-hand side of the system of equations RETURNS ======== The solution for the given tri-diagonal system of equations. ''' n = len(a) # determine number of equations in system #initialize alpha = np.zeros(n) beta = np.zeros(n) alpha[0] = a[0] beta[0] = d[0] # first (forward) loop to zero c[i]'s for i in range(1, n, 1): # in python, c's index is from 0 to n-2, not 1 to n-1, have to subtract 1 alpha[i] = a[i] - (b[i-1] * c[i-1]) / alpha[i-1] beta[i] = d[i] - (beta[i-1] * c[i-1]) / alpha[i-1] #initialize and set last step x = np.zeros(n) x[n-1] = beta[n-1] / alpha[n-1] # second (backwards) loop to find solutions for j in range(n-2, -1, -1): #indices are weird, want to step from n-2 to 0 x[j] = (beta[j] - b[j-1] * x[j+1]) / alpha[j] return x
2,620
970
from base64 import b64encode import pytest @pytest.mark.django_db def test_login_view(user, client, settings): """Test that the login view can login and logout.""" login_url = f"/{settings.LOGIN_URL.strip('/')}/" # Django doesn't store the raw password, so we need to set one we know. password = "A test password" user.set_password(password) user.save() # Test login with valid credentials auth_str = b64encode(f"{user.username}:{password}".encode()).decode() client.credentials(HTTP_AUTHORIZATION=f"Basic {auth_str}") response = client.post(login_url) assert response.status_code == 200 assert response.data["username"] == user.username assert response.data["email"] == user.email # Get user using just session ID client.credentials() response = client.get(login_url) assert response.status_code == 200 assert response.data["username"] == user.username assert response.data["email"] == user.email # Logout using just session ID response = client.delete(login_url) assert response.status_code == 200 assert not response.data["username"] assert not response.data["email"] # Test with invalid credentials response = client.post(login_url) assert response.status_code == 403 response = client.get(login_url) assert response.status_code == 403 response = client.delete(login_url) assert response.status_code == 403
1,444
441
from datetime import time from vnpy.app.cta_strategy import ( CtaTemplate, StopOrder, TickData, BarData, TradeData, OrderData, BarGenerator, ArrayManager ) from vnpy.app.cta_strategy.base import ( EngineType, STOPORDER_PREFIX, StopOrder, StopOrderStatus, ) from vnpy.app.cta_strategy.TSMtools import TSMArrayManager import numpy as np class TSMyoPolyfitStrategy(CtaTemplate): """""" author = "TheSuperMyo" # 日内交易 exit_time = time(hour=14, minute=54) # 针对不同交易时间的市场 open_time_night = time(hour=21,minute=0)# 商品夜盘 open_time_day_1 = time(hour=9,minute=0)# 商品 open_time_day_2 = time(hour=9,minute=30)# 股指 close_time_day = time(hour=15,minute=0)# 商品/股指(除了利率期货) close_time_night_1 = time(hour=23,minute=0)# 其他夜盘商品 close_time_night_2 = time(hour=1,minute=0)# 工业金属 close_time_night_3 = time(hour=2,minute=30)# 黄金/白银/原油 break_time_start_1 = time(hour=10,minute=15)# 商品茶歇 break_time_start_2 = time(hour=11,minute=30)# 全体午休 break_time_end_1 = time(hour=10,minute=30)# 商品茶歇 break_time_end_2 = time(hour=13,minute=0)# 股指下午 break_time_end_3 = time(hour=13,minute=30)# 商品下午 poly_entry_1 = 0.5 # 入场一阶导条件 poly_entry_2 = 0.06 # 入场二阶导条件 poly_out_1 = 0.2 # 出场一阶导条件 poly_out_2 = 0 # 出场二阶导条件 fit_bar = 3 # K线周期 setup_fit = 85 # 基础拟合分钟数 end_window = 95 # 时间窗口分钟数 trailing_stop = 0.45 # 跟踪止损 fixed_size = 1 # 固定手数 bar_counter = 0 # 每日分钟计数器 poly_1 = 0 poly_2 = 0 long_entry = 0 short_entry = 0 long_exit = 0 short_exit = 0 stop_long = 0 stop_short = 0 hold_high = 0 hold_low = 0 parameters = ['poly_entry_1','poly_entry_2','poly_out_1','poly_out_2','end_window','setup_fit','fit_bar','trailing_stop','fixed_size'] variables = ['bar_counter','poly_1','poly_2','stop_long','stop_short'] def __init__(self, cta_engine, strategy_name, vt_symbol, setting): """""" super(TSMyoPolyfitStrategy, self).__init__( cta_engine, strategy_name, vt_symbol, setting ) self.bg = BarGenerator(self.on_bar, self.fit_bar, self.on_fit_bar) # 股指每天240分钟 self.am = TSMArrayManager(240) # 策略自身订单管理 self.active_orderids = [] self.bars = [] def on_init(self): """ Callback when strategy is inited. """ self.write_log("策略初始化") # 不会用到昨日数据 self.load_bar(10) def on_start(self): """ Callback when strategy is started. """ self.write_log("策略启动") def on_stop(self): """ Callback when strategy is stopped. """ self.write_log("策略停止") def tick_filter(self, tick: TickData): """ 过滤异常时间的tick """ tick_time = tick.datetime.time() if tick_time < self.open_time_day_2: return False if tick_time > self.break_time_start_2 and tick_time < self.break_time_end_2: return False if tick_time > self.close_time_day: return False return True def on_tick(self, tick: TickData): """ Callback of new tick data update. """ if not self.tick_filter(tick): return self.bg.update_tick(tick) def on_bar(self, bar: BarData): """ 1.分钟计数 2.根据信号挂单 """ self.bar_counter += 1 self.bg.update_bar(bar) self.cancel_all() if self.pos == 0 and bar.datetime.time() < self.exit_time: if self.long_entry: # 入场开多,收盘价 if self.active_orderids: self.write_log("撤单不干净,无法挂单") return orderids = self.buy(bar.close_price, self.fixed_size, False, True) self.active_orderids.extend(orderids) if self.short_entry: # 入场开空,收盘价 if self.active_orderids: self.write_log("撤单不干净,无法挂单") return orderids = self.short(bar.close_price, self.fixed_size, False, True) self.active_orderids.extend(orderids) if self.pos > 0: self.hold_high = max(self.hold_high,bar.high_price) self.stop_long = self.hold_high*(1-self.trailing_stop/100) if self.long_exit or bar.datetime.time() > self.exit_time: # 信号平多 or 日内平仓 if self.active_orderids: self.write_log("撤单不干净,无法挂单") return orderids = self.sell(bar.close_price, self.fixed_size, False, True) self.active_orderids.extend(orderids) else: # 停止单平多 if self.active_orderids: self.write_log("撤单不干净,无法挂单") return orderids = self.sell(self.stop_long, self.fixed_size, True, True) self.active_orderids.extend(orderids) if self.pos < 0: self.hold_low = min(self.hold_low,bar.high_price) self.stop_short = self.hold_low*(1+self.trailing_stop/100) if self.short_exit or bar.datetime.time() > self.exit_time: # 信号平空 or 日内平仓 if self.active_orderids: self.write_log("撤单不干净,无法挂单") return orderids = self.cover(bar.close_price, self.fixed_size, False, True) self.active_orderids.extend(orderids) else: # 停止单平空 if self.active_orderids: self.write_log("撤单不干净,无法挂单") return orderids = self.cover(self.stop_short, self.fixed_size, True, True) self.active_orderids.extend(orderids) def on_fit_bar(self, bar: BarData): """ 1.负责每日开盘的初始化 2.计算一二阶导数并产生信号 """ # for backtest # self.cta_engine.output(f"{bar.datetime.time()}") # self.write_log(f"{bar.datetime.time()}") am = self.am am.update_bar(bar) self.bars.append(bar) if len(self.bars) <= 2: return else: self.bars.pop(0) last_bar = self.bars[-2] # 开盘fit_min_bar if last_bar.datetime.date() != bar.datetime.date(): # 初始化 self.bar_counter = self.fit_bar self.long_entry = 0 self.short_entry = 0 self.long_exit = 0 self.short_exit = 0 if self.bar_counter < self.setup_fit: return self.poly_1, self.poly_2 = am.polyfit(int((self.bar_counter)/self.fit_bar)) if self.pos == 0 and self.bar_counter < self.end_window: if self.poly_1 > self.poly_entry_1 and self.poly_2 > self.poly_entry_2: # 加速上涨,开多信号 self.long_entry = 1 self.short_entry = 0 self.long_exit = 0 self.short_exit = 0 if self.poly_1 < -self.poly_entry_1 and self.poly_2 < -self.poly_entry_2: # 加速下跌,开空信号 self.long_entry = 0 self.short_entry = 1 self.long_exit = 0 self.short_exit = 0 if self.pos > 0: if self.poly_1 < self.poly_out_1 or self.poly_2 < self.poly_out_2: # 减速上涨,平多信号 self.long_entry = 0 self.short_entry = 0 self.long_exit = 1 self.short_exit = 0 if self.pos < 0: if self.poly_1 > -self.poly_out_1 or self.poly_2 > -self.poly_out_2: self.long_entry = 0 self.short_entry = 0 self.long_exit = 0 self.short_exit = 1 def on_order(self, order: OrderData): """ Callback of new order data update. """ # 移除已成交或已撤销的订单 if not order.is_active() and order.vt_orderid in self.active_orderids: self.active_orderids.remove(order.vt_orderid) def on_trade(self, trade: TradeData): """ Callback of new trade data update. """ # 邮寄提醒 self.send_email(f"{trade.vt_symbol}在{trade.time}成交,价格{trade.price},方向{trade.direction}{trade.offset},数量{trade.volume}") self.long_entry = 0 self.short_entry = 0 self.long_exit = 0 self.short_exit = 0 self.put_event() def on_stop_order(self, stop_order: StopOrder): """ Callback of stop order update. """ # 刚刚生成的本地停止单 if stop_order.status == StopOrderStatus.WAITING: return # 撤销的本地停止单,从活跃列表移除 if stop_order.status == StopOrderStatus.CANCELLED: if stop_order.stop_orderid in self.active_orderids: self.active_orderids.remove(stop_order.stop_orderid) # 触发的本地停止单,停止单移除,限价单加入 if stop_order.status == StopOrderStatus.TRIGGERED: if stop_order.stop_orderid in self.active_orderids: self.active_orderids.remove(stop_order.stop_orderid) self.active_orderids.extend(stop_order.vt_orderids) # 撤掉其他停止单 for other_orderids in self.active_orderids: if other_orderids.startswith(STOPORDER_PREFIX): self.cancel_order(other_orderids)
9,445
3,644
# run.py """ Script for running a specific pipeline from a given yaml config file """ import os import argparse import yaml from importlib import import_module import numpy as np import time import pandas as pd def import_from_path(path_to_module, obj_name = None): """ Import an object from a module based on the filepath of the module and the string name of the object. If obj_name is None, return the module instead. """ module_name = path_to_module.replace("/",".").strip(".py") module = import_module(module_name) if obj_name == None: return module obj = getattr(module, obj_name) return obj if __name__ == "__main__": parser = argparse.ArgumentParser(description = __doc__) parser.add_argument("-c", "--config", help = "File path to the config file") parser.add_argument("-o", "--output", help = "Path to the output file") args = parser.parse_args() with open(args.config) as config_file: config = yaml.safe_load(config_file) if args.output != None: output = True out_csv = args.output dfs = [] else: output = False # Importing pipeline elements ds_splitter = import_from_path(config["split"]["filepath"], config["split"]["class"]) (**config["split"]["parameters"]) preprocess = import_from_path(config["preprocess"]["filepath"]) model_params = config["model"]["parameters"] if "kernel" in model_params: kernel_func = import_from_path(model_params["kernel"]["filepath"], model_params["kernel"]["class"]) kernel_params = model_params["kernel"]["parameters"] model_params["kernel"] = lambda X, Y: kernel_func(X,Y,**kernel_params) model = import_from_path(config["model"]["filepath"], config["model"]["class"])(**config["model"]["parameters"]) evaluation = import_from_path(config["evaluation"]["filepath"]) # Evaluation output directory out_dir = 'submissions' if output and not os.path.isdir(out_dir): os.makedirs(out_dir) # Lists filling information for the output dataframe datasets = [] metrics = [] values = [] # Applying pipeline # Iterate over datasets for i, dataset in enumerate(config["datasets"]): time_beg = time.time() print("Working on dataset ", i) # Read dataset X = pd.read_csv(dataset["X"]["filepath"], **dataset["X"]["parameters"]) ## It is currently very important to drop Id before splitting or preprocessing y = pd.read_csv(dataset["y"]["filepath"], **dataset["y"]["parameters"]).drop("Id", axis = 1) if output: test = pd.read_csv(dataset["test"]["filepath"], **dataset["test"]["parameters"]) # Split dataset ds_splitter.generate_idx(y) X_train, X_test = ds_splitter.split(X) y_train, y_test = ds_splitter.split(y) # Preprocess dataset for transform in config["preprocess"]["X"]: X_train = getattr(preprocess, transform["transform"])(X_train, **transform["parameters"]) X_test = getattr(preprocess, transform["transform"])(X_test, **transform["parameters"]) for transform in config["preprocess"]["y"]: y_train = getattr(preprocess, transform["transform"])(y_train, **transform["parameters"]) y_test = getattr(preprocess, transform["transform"])(y_test, **transform["parameters"]) if output: for transform in config["preprocess"]["X"]: test = getattr(preprocess, transform["transform"])(test, **transform["parameters"]) # Fit model model.fit(X_train, y_train) y_pred = model.predict(X_test) if output: y_pred_test = model.predict(test) y_pred_test = (y_pred_test + 1)/2 id = np.arange(1000*i, 1000*(i + 1)) dic = {'Id': id, 'Bound': y_pred_test} df = pd.DataFrame(data = dic) dfs.append(df) # Evaluate model for metric in config["evaluation"]["metrics"]: datasets.append(dataset["name"]) metrics.append(metric) values.append(getattr(evaluation, metric)(y_pred, y_test)) print("Done ! In {} s".format(time.time() - time_beg)) if output: df = pd.concat(dfs).astype('int32') df.to_csv(os.path.join(out_dir, out_csv), index = False) results = {"datasets": datasets, "metrics": metrics, "values": values} print(pd.DataFrame.from_dict(results))
4,757
1,384
import pygame from pygame.sprite import Group from button import Button from game_stats import GameStats from settings import Settings from ship import Ship from scoreboard import Scoreboard import game_funcitons as gf def run_game(): pygame.init() pygame.mixer.init() ai_settings = Settings() screen = pygame.display.set_mode( (ai_settings.screen_width, ai_settings.screen_height)) pygame.display.set_caption("Alien Invasion") play_button = Button(ai_settings, screen, 'PLAY') stats = GameStats(ai_settings) scoreboard = Scoreboard(ai_settings, screen, stats) bg_img1 = pygame.image.load("images/map.jpg").convert() bg_img2 = bg_img1.copy() pos_y1 = -1024 pos_y2 = 0 ship = Ship(ai_settings, screen) aliens = Group() bullets = Group() alien_bullets = Group() gf.create_fleet(ai_settings, screen, aliens, alien_bullets) #背景音乐 gf.play_music('bgm') clock = pygame.time.Clock() while True: # 按键事件 gf.check_events(ai_settings, screen, stats, scoreboard, play_button, ship, aliens, bullets, alien_bullets) gf.update_bullets(ai_settings, screen, stats, scoreboard, aliens, bullets, alien_bullets) time_passed = clock.tick() if stats.game_active: stats.increase_time(time_passed) # 飞机/子弹 更新 ship.update() #敌机位置 gf.update_aliens(ai_settings, stats, scoreboard, screen, ship, aliens, bullets, alien_bullets, time_passed) # 背景滚动 screen.blit(bg_img1, (0, pos_y1)) screen.blit(bg_img2, (0, pos_y2)) pos_y1 += ai_settings.bg_roll_speed_factor pos_y2 += ai_settings.bg_roll_speed_factor if pos_y1 > 0: pos_y1 = -1024 if pos_y2 > 1024: pos_y2 = 0 gf.update_screen(ai_settings, screen, stats, scoreboard, ship, aliens, bullets, alien_bullets, play_button, time_passed) run_game()
1,970
722
import os from contextlib import contextmanager class LocalFS: @contextmanager def get(self, src): path = os.path.realpath(os.path.expanduser(src)) yield path
185
56
import os from pyfakefs import fake_filesystem_unittest from devbox.utilities.manifest_parser import ManifestParser class TestManifestParser(fake_filesystem_unittest.TestCase): def setUp(self): self.setUpPyfakefs() def test_manifest_parser(self): # Arrange self.fs.CreateFile('my-app/devbox.yaml', contents=""" tosca_definitions_version: tosca_simple_yaml_1_0 topology_template: node_templates: python_server1: type: tosca.nodes.Python properties: ports_bindings: type: string default: "{1234:80}" artifacts: binaries: file: binaries.zip python_client1: type: tosca.nodes.Python node_types: tosca.nodes.Python: derived_from: tosca.nodes.SoftwareComponent properties: deployment_image: type: string default: rastasheep/ubuntu-sshd deployment_command: type: string default: /bin/sh deployment_ports: type: list default: [22, 1234] ports_bindings: type: string required: false provisioning_instruction: type: string default: playbook.yaml """) nodes = ManifestParser().parse('my-app/devbox.yaml') self.assertEqual(nodes[0].properties['deployment_ports'], [22, 1234]) self.assertEqual(nodes[0].properties['ports_bindings'], "{1234:80}") self.assertTrue('ports_bindings' not in nodes[1].properties) def test_manifest_parser_deployment_path(self): # Arrange self.fs.CreateFile('my-app/devbox.yaml', contents=""" tosca_definitions_version: tosca_simple_yaml_1_0 topology_template: node_templates: python_server1: type: tosca.nodes.Python properties: ports_bindings: type: string default: "{1234:80}" execution_command: type: string default: "abcd" artifacts: binaries: artifacts_path: /home/user/myappfolder deploy_path: mybin python_client1: type: tosca.nodes.Python node_types: tosca.nodes.Python: derived_from: tosca.nodes.SoftwareComponent properties: deployment_image: type: string default: rastasheep/ubuntu-sshd deployment_command: type: string default: /bin/sh deployment_ports: type: list default: [22, 1234] ports_bindings: type: string required: false provisioning_instruction: type: string default: playbook.yaml execution_command: type: string default: "" """) nodes = ManifestParser().parse('my-app/devbox.yaml') self.assertEqual(nodes[0].properties['deployment_ports'], [22, 1234]) self.assertEqual(nodes[0].properties['ports_bindings'], "{1234:80}") self.assertEqual(nodes[0].artifacts['binaries']['deploy_path'], "mybin") self.assertEqual(nodes[0].artifacts['binaries']['artifacts_path'], "/home/user/myappfolder") self.assertEqual(nodes[0].properties['execution_command'], "abcd") self.assertEqual(nodes[1].properties['execution_command'], "") self.assertTrue('ports_bindings' not in nodes[1].properties)
3,542
1,027
from enum import Enum, auto from string import Template class MessageRegister: def __init__(self, dispatch_table=None): self.dispatch = dict() if dispatch_table is None else dispatch_table def register(self, ref): def decorator(func): self.dispatch[ref] = func return func return decorator def get(self, ref): return self.dispatch.get(ref) def __getitem__(self, ref): return self.dispatch[ref] messages = MessageRegister() class MessageBank(Enum): code_block_needed = auto() inline_code_misuse = auto() class MessageStorage: code_block = '''Looks like your Batch file code isn’t wrapped in a code block. To format code correctly on **new.reddit.com**, highlight the code and select *‘Code Block’* in the editing toolbar. If you’re on **old.reddit.com**, separate the code from your text with a blank line and precede each line of code with **4 spaces** or a **tab**. --- ^(*Beep-boop. I am a bot.*) ''' code_block_with_example = '''Looks like your Batch file code isn’t wrapped in a code block. To format code correctly on **new.reddit.com**, highlight the code and select *‘Code Block’* in the editing toolbar. If you’re on **old.reddit.com**, separate the code from your text with a blank line and precede each line of code with **4 spaces** or a **tab**. E.g., This is normal text. @echo off echo This is code! > This is normal text. > > @echo off > echo This is code! --- ^(*Beep-boop. I am a bot.*) ''' inline_code = '''Looks like you used *inline code* formatting where a **code block** should have been used. The inline code text styling is for use in paragraphs of text. For larger sequences of code, consider using a code bock. This can be done by selecting your code then clicking the *‘Code Block’* button. --- ^(*Beep-boop. I am a bot.*) ''' class MessageData: @messages.register(MessageBank.code_block_needed) def code_block_needed(example=False, **kws): return MessageStorage.code_block_with_example if example else MessageStorage.code_block @messages.register(MessageBank.inline_code_misuse) def inline_code_misuse(**kws): return MessageStorage.inline_code
2,140
693
from django.apps import AppConfig class ClxQueryConfig(AppConfig): name = "clxquery"
91
30
import os import shutil import pychemia import tempfile import unittest class MyTestCase(unittest.TestCase): def test_incar(self): """ Test (pychemia.code.vasp) [INCAR parsing and writing] : """ print(os.getcwd()) iv = pychemia.code.vasp.read_incar('tests/data/vasp_01/INCAR') self.assertEqual(len(iv), 12) self.assertEqual(iv.EDIFF, 1E-7) wf = tempfile.NamedTemporaryFile() iv.write(wf.name) wf.close() iv4dir = pychemia.code.vasp.read_incar('tests/data/vasp_01') self.assertEqual(iv, iv4dir) self.assertRaises(ValueError, pychemia.code.vasp.read_incar, 'tests/data') iv3 = pychemia.code.vasp.VaspInput(variables={'EDIFF': 1E-6}) self.assertEqual(iv3['EDIFF'], 1E-6) iv = pychemia.code.vasp.read_incar('tests/data/vasp_02') iv.EDIFF *= 1.3 td = tempfile.mkdtemp() pychemia.code.vasp.write_incar(iv, td) self.assertRaises(ValueError, iv.write_key, 'EDIF') shutil.rmtree(td) def test_bad_outcar(self): """ Test (pychemia.code.vasp) [corrupted VASP OUTCAR] : """ vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_04/OUTCAR') self.assertTrue(vo.is_finished) def test_encut_setup(self): """ Test (pychemia.code.vasp) [ENCUT setup] : """ iv = pychemia.code.vasp.read_incar('tests/data/vasp_06') iv.set_encut(ENCUT=1.2, POTCAR='tests/data/vasp_06/POTCAR') self.assertEqual(iv.ENCUT, 307) iv.set_rough_relaxation() self.assertEqual(iv.EDIFFG, -1E-2) iv.set_mit_settings() def test_vaspjob(self): """ Test (pychemia.code.vasp) [VaspJob] : """ td = tempfile.mkdtemp() st = pychemia.code.vasp.read_poscar('tests/data/vasp_06') kp = pychemia.code.vasp.read_kpoints('tests/data/vasp_06') self.assertEqual(kp.number_of_kpoints, 693) iv = pychemia.code.vasp.read_incar('tests/data/vasp_06') vj = pychemia.code.vasp.VaspJob(workdir=td,) vj.initialize(st, kpoints=kp) vj.set_input_variables(iv) vj.write_poscar() vj.write_kpoints() vj.write_incar() shutil.rmtree(td) def test_outcar(self): """ Test (pychemia.code.vasp) [outcar] : """ vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_06/OUTCAR') self.assertEqual(vo.get_memory_used()['grid'], (1028.0, 'kBytes')) self.assertAlmostEqual(vo.to_dict['energy'], -19.67192646) print(vo) self.assertTrue(vo.has_forces_stress_energy()) def test_poscar(self): """ Test (pychemia.code.vasp) [poscar] : """ # Temporal directory for outputs tmpdir = tempfile.mkdtemp() # Read a POSCAR by directory st = pychemia.code.vasp.read_poscar('tests/data/vasp_06') self.assertEqual(st.natom, 4) # Opening old format POSCAR without POTCAR with self.assertRaises(ValueError) as context: st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR') st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_old') self.assertEqual(st.natom, 2) st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_new') self.assertEqual(st.natom, 2) with self.assertRaises(ValueError) as context: pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='/no/existing/path') with self.assertRaises(ValueError) as context: pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data') cwd = os.getcwd() os.chdir('tests/data/vasp_07') st = pychemia.code.vasp.read_poscar('POSCAR_new') os.chdir(cwd) self.assertEqual(st.natom, 44) st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR_alt') pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR1') pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR2', direct=False) pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR3', newformat=False) st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR1') self.assertAlmostEqual(st.volume, 584.47161926043907) sym = pychemia.crystal.CrystalSymmetry(st) self.assertEqual(sym.symbol(), 'C2/c') st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR2') self.assertAlmostEqual(st.volume, 584.47161926043907) sym = pychemia.crystal.CrystalSymmetry(st) self.assertEqual(sym.symbol(), 'C2/c') st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR3') self.assertAlmostEqual(st.volume, 584.47161926043907) sym = pychemia.crystal.CrystalSymmetry(st) self.assertEqual(sym.symbol(), 'C2/c') pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data') pychemia.code.vasp.get_potcar_info(tmpdir + os.sep + 'POTCAR') shutil.rmtree(tmpdir)
5,254
2,078
import os class Material: def __init__(self, name, color, outputs): self.name = name self.color = color self.outputs = outputs materials = [Material("dilithium", 0xddcecb, ("DUST", "GEM")), Material("iron", 0xafafaf, ("SHEET", "STICK", "DUST", "PLATE")), Material("gold", 0xffff5d, ("DUST", "COIL", "PLATE")), Material("silicon", 0x2c2c2b, ("INGOT", "DUST", "BOULE", "NUGGET", "PLATE")), Material("copper", 0xd55e28, ("ORE", "COIL", "BLOCK", "STICK", "INGOT", "NUGGET", "DUST", "PLATE", "SHEET")), Material("tin", 0xcdd5d8, ("ORE", "BLOCK", "PLATE", "INGOT", "NUGGET", "DUST")), Material("steel", 0x55555d, ("BLOCK", "FAN", "PLATE", "INGOT", "NUGGET", "DUST", "STICK", "GEAR", "SHEET")), Material("titanium", 0xb2669e, ("PLATE", "COIL", "INGOT", "NUGGET", "DUST", "STICK", "BLOCK", "GEAR", "SHEET")), Material("rutile", 0xbf936a, ("ORE",)), Material("aluminum", 0xb3e4dc, ("ORE", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET")), Material("iridium", 0xdedcce, ("ORE", "COIL", "BLOCK", "DUST", "INGOT", "NUGGET", "PLATE", "STICK"))] materials = [Material("dilithium", 0xddcecb, ("DUST", "GEM")), Material("titaniumaluminide", 0xaec2de, ("GEAR", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET")), Material("titaniumiridium", 0xd7dfe4, ("GEAR", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET"))] blockTypes = ['COIL', 'BLOCK', 'ORE'] coilTypes = ['COIL'] noIconGenTypes = ['ORE'] itemSample = '{\n "parent": "item/generated",\n "textures": {\n "layer0": "libvulpes:items/@TYPE@@MATERIAL@"\n }\n}' blockItemSample = '{\n "parent": "libvulpes:block/@TYPE@@MATERIAL@"\n}' blockSample = '{\n "parent": "minecraft:block/cube_all",\n "textures": {\n "all": "libvulpes:blocks/@TYPE@@MATERIAL@"\n }\n}' coilSample = '{\n "parent": "libvulpes:block/tintedcubecolumn",\n "textures": {\n "end": "libvulpes:blocks/@TYPE@@MATERIAL@top",\n "side": "libvulpes:blocks/@TYPE@@MATERIAL@side"\n }\n}' blockStateSample = '{\n "variants": {\n "": { "model": "libvulpes:block/@TYPE@@MATERIAL@" }\n }\n}' itemDir = 'src/main/resources/assets/libvulpes/models/item/' blockDir = 'src/main/resources/assets/libvulpes/models/block/' blockStateDir = 'src/main/resources/assets/libvulpes/blockstates/' itemIconDir = 'src/main/resources/assets/libvulpes/textures/items/' blockIconDir = 'src/main/resources/assets/libvulpes/textures/blocks/' blockTagPath = "src/main/resources/data/forge/tags/blocks/" itemTagPath = "src/main/resources/data/forge/tags/items/" blockTagSample = '{\n "replace": false,\n "values": [@BLOCKLIST@]\n}' def getMatrix(color): r = ((color >> 16) & 0xff)/0xff g = ((color >> 8) & 0xff)/0xff b = (color & 0xff)/0xff return str(r) + ' 0 0 0 ' + str(g) + ' 0 0 0 ' + str(b) def getCommand(inputFile, outputFile, color): return 'convert ' + inputFile + ' -color-matrix "' + getMatrix(color) + '" ' + outputFile def genItem(mat, objType): if not objType in blockTypes: output = itemSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower()) else: output = blockItemSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower()) filename = itemDir + objType.lower() + mat.name + '.json' f = open(filename, 'w') f.write(output) # generate the icon now if not objType in blockTypes: inputFile = itemIconDir + objType.lower() + '.png' outputFile = itemIconDir + objType.lower() + mat.name + '.png' cmd = getCommand(inputFile, outputFile, mat.color) os.system(cmd) def genBlock(mat, objType): if objType in coilTypes: output = coilSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower()) else: output = blockSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower()) filename = blockDir + objType.lower() + mat.name + '.json' f = open(filename, 'w') f.write(output) # generate the blockState output = blockStateSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower()) filename = blockStateDir + objType.lower() + mat.name + '.json' f = open(filename, 'w') f.write(output) # Generate the icon now if not objType in noIconGenTypes: if objType in coilTypes: inputFile = blockIconDir + objType.lower() + 'pole.png' outputFile = blockIconDir + objType.lower() + mat.name + 'top.png' cmd = getCommand(inputFile, outputFile, mat.color) os.system(cmd) inputFile = blockIconDir + objType.lower() + 'side.png' outputFile = blockIconDir + objType.lower() + mat.name + 'side.png' cmd = getCommand(inputFile, outputFile, mat.color) os.system(cmd) else: inputFile = blockIconDir + objType.lower() + '.png' outputFile = blockIconDir + objType.lower() + mat.name + '.png' cmd = getCommand(inputFile, outputFile, mat.color) os.system(cmd) def printEnLang(mat, objType, block): human_mat = mat.name human_type = objType.lower() human_mat = human_mat[0].upper() + human_mat[1:] human_type = human_type[0].upper() + human_type[1:] if block: print(' "block.libvulpes.{}{}": "{} {}",'.format(objType.lower(),mat.name, human_mat, human_type)) else: print(' "item.libvulpes.{}{}": "{} {}",'.format(objType.lower(),mat.name, human_mat, human_type)) def generateTag(tagPath, mat, objType): if not os.path.exists(tagPath + objType.lower()): os.makedirs(tagPath + objType.lower()) filename = tagPath + objType.lower() + '/' + mat.name + '.json' contents = blockTagSample.replace('@BLOCKLIST@', ' "libvulpes:' + objType.lower() + mat.name + '"') f = open(filename, 'w') f.write(contents) f.close() objTypeToMaterialMap = {} for mat in materials: for objType in mat.outputs: if objType not in objTypeToMaterialMap: objTypeToMaterialMap[objType] = [] #objTypeToMaterialMap[objType].append(mat) #genItem(mat, objType) if objType in blockTypes: # genBlock(mat, objType) generateTag(blockTagPath, mat, objType) generateTag(itemTagPath, mat, objType) #printEnLang(mat, objType, objType in blockTypes) for objType in objTypeToMaterialMap.keys(): contentString = [] for mat in objTypeToMaterialMap[objType]: contentString.append(' "libvulpes:' + objType.lower() + mat.name + '"') contents = blockTagSample.replace('@BLOCKLIST@', ',\n'.join(contentString)) f = None try: if objType in blockTypes: f = open(blockTagPath + objType.lower() + '.json', 'w') else: f = open(itemTagPath + objType.lower() + '.json', 'w') f.write(contents) f.close() except FileNotFoundError: pass
7,048
2,597
# # # 0=================================0 # | Kernel Point Convolutions | # 0=================================0 # # # ---------------------------------------------------------------------------------------------------------------------- # # Class handling SemanticKitti dataset. # Implements a Dataset, a Sampler, and a collate_fn # # ---------------------------------------------------------------------------------------------------------------------- # # Hugues THOMAS - 11/06/2018 # # ---------------------------------------------------------------------------------------------------------------------- # # Imports and global variables # \**********************************/ # # Common libs import sys import struct import scipy import time import numpy as np import pickle import torch import yaml #from mayavi import mlab from multiprocessing import Lock import open3d from scipy.spatial.transform import Rotation as scipyR from scipy.spatial.transform import Slerp import matplotlib.pyplot as plt # OS functions from os import listdir from os.path import exists, join, isdir, getsize # Dataset parent class from utils.mayavi_visu import * from sklearn.neighbors import KDTree from slam.cpp_slam import polar_normals, bundle_pt2pl_icp from datasets.common import grid_subsampling import open3d as o3d import copy import re from utils.mayavi_visu import show_point_cloud def compute_plane(points): ref_point = points[0] normal = np.cross(points[1] - points[0], points[2] - points[0]) normal = normal / np.sqrt(np.sum(np.power(normal, 2))) return ref_point, normal def in_plane(points, ref_pt, normal, threshold_in=0.1): return np.abs(np.dot((points - ref_pt), normal)) < threshold_in def RANSAC(points, NB_RANDOM_DRAWS=100, threshold_in=0.1): best_mask = None best_vote = 3 best_ref_pt, best_normal = compute_plane(points[:3]) N = len(points) for i in range(NB_RANDOM_DRAWS): # Random selection of points random_inds = np.zeros((0,), dtype=np.int32) while random_inds.shape[0] < 3: new_inds = np.random.randint(0, N, size=3, dtype=np.int32) random_inds = np.unique(np.hstack((random_inds, new_inds))) random_inds = random_inds[:3] # Corresponding plane ref_pt, normal = compute_plane(points[random_inds]) # Number of votes mask = in_plane(points, ref_pt, normal, threshold_in) vote = np.sum(mask) # Save if vote > best_vote: best_ref_pt = ref_pt best_normal = normal best_vote = vote best_mask = mask return best_ref_pt, best_normal, best_mask def extract_ground(points, normals, out_folder, vertical_thresh=10.0, dist_thresh=0.15, remove_dist=0.15, saving=True): # Get points with vertical normal vertical_angle = np.arccos(np.abs(np.clip(normals[:, 2], -1.0, 1.0))) # Use the thresold on the vertical angle in degree plane_mask = vertical_angle < vertical_thresh * np.pi / 180 # Get the ground plane with RANSAC plane_P, plane_N, _ = RANSAC(points[plane_mask], threshold_in=dist_thresh) # Get mask on all the points plane_mask = in_plane(points, plane_P, plane_N, dist_thresh) mask0 = np.copy(plane_mask) # Get better ground/objects boundary candidates = points[plane_mask] others = points[np.logical_not(plane_mask)] dists, inds = KDTree(others).query(candidates, 1) plane_mask[plane_mask] = np.squeeze(dists) > remove_dist if saving: ground_points = points[plane_mask] ground_normals = normals[plane_mask] write_ply(join(out_folder, 'ground.ply'), [ground_points, ground_normals], ['x', 'y', 'z', 'nx', 'ny', 'nz']) return plane_mask def read_pgm(filename, byteorder='>'): """Return image data from a raw PGM file as numpy array. Format specification: http://netpbm.sourceforge.net/doc/pgm.html """ with open(filename, 'rb') as f: buffer = f.read() try: header, width, height, maxval = re.search( b"(^P5\s(?:\s*#.*[\r\n])*" b"(\d+)\s(?:\s*#.*[\r\n])*" b"(\d+)\s(?:\s*#.*[\r\n])*" b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups() except AttributeError: raise ValueError("Not a raw PGM file: '%s'" % filename) return np.frombuffer(buffer, dtype='u1' if int(maxval) < 256 else byteorder+'u2', count=int(width)*int(height), offset=len(header) ).reshape((int(height), int(width))) def write_pgm(filename, image): # open in text mode to write the header with open(filename, 'w') as pgm_file: # First magical word header = ['P5'] header.append('{:d} {:d}'.format(image.shape[1], image.shape[0])) header.append('255') for line in header: pgm_file.write("%s\n" % line) # open in binary/append to use tofile with open(filename, 'ab') as pgm_file: image.tofile(pgm_file) def pointmap_for_AMCL(): # ----------------------------------------------------------------------------------------- # Load original map for comparison path = '../../Myhal_Simulation/Simulator/JackalTourGuide/src/jackal_velodyne/maps' pgm_file = 'myhal_map_V3.pgm' yml_file = 'myhal_map_V3.yaml' with open(join(path, yml_file), 'r') as stream: doc = yaml.safe_load(stream) print('-----------------------------') print('image:', doc['image']) print('resolution:', doc['resolution']) print('origin:', doc['origin']) print('negate:', doc['negate']) print('occupied_thresh:', doc['occupied_thresh']) print('free_thresh:', doc['free_thresh']) print('-----------------------------') image = read_pgm(join(path, pgm_file), byteorder='<') # plt.imshow(image) # plt.show() # ----------------------------------------------------------------------------------------- # Load point map path = '../../Myhal_Simulation/slam_offline/2020-10-02-13-39-05' ply_file = 'map_update_0002.ply' data = read_ply(join(path, ply_file)) points = np.vstack((data['x'], data['y'], data['z'])).T heights = points[:, 2] min_z = np.min(heights) heights = heights[heights < min_z + 0.09] ground_z = np.median(heights) z1 = ground_z + 0.3 z2 = ground_z + 1.0 mask_2D = np.logical_and(points[:, 2] < z2, points[:, 2] > z1) points_2D = points[mask_2D, :2] # ----------------------------------------------------------------------------------------- # Fill map_image origin_2D = np.array(doc['origin'][:2], dtype=np.float32) # Compute voxel indice for each frame point grid_indices = (np.floor((points_2D - origin_2D) / doc['resolution'])).astype(int) # Flip first axis it is an image grid_indices[:, 1] = image.shape[0] - grid_indices[:, 1] # Scalar equivalent to grid indices scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * image.shape[0] vec_img = np.reshape(image * 0 + 255, (-1,)) vec_img[np.unique(scalar_indices)] = 0 image2 = np.reshape(vec_img, image.shape) f, axarr = plt.subplots(1, 2) axarr[0].imshow(image) axarr[1].imshow(image2) plt.show() # ----------------------------------------------------------------------------------------- # Save and check saved path = '../../Myhal_Simulation/Simulator/JackalTourGuide/src/jackal_velodyne/maps' pgm_file = 'myhal_map_V4.pgm' yml_file = 'myhal_map_V4.yaml' doc['image'] = pgm_file if False and exists(join(path, pgm_file)): imagetest = read_pgm(join(path, pgm_file), byteorder='<') plt.imshow(imagetest) plt.show() else: with open(join(path, yml_file), 'w') as outfile: yaml.dump(doc, outfile) write_pgm(join(path, pgm_file), image2) # ----------------------------------------------------------------------------------------- # change map parameters doc['image'] = pgm_file doc['resolution'] = 0.05 doc['origin'] = [-22, -22, 0] limit_2D = np.array([22, 22], dtype=np.float32) origin_2D = np.array(doc['origin'][:2], dtype=np.float32) image_w, image_h = (np.ceil((limit_2D - origin_2D) / doc['resolution'])).astype(int) # Compute voxel indice for each frame point grid_indices = (np.floor((points_2D - origin_2D) / doc['resolution'])).astype(int) # Flip first axis it is an image grid_indices[:, 1] = image_h - grid_indices[:, 1] # Scalar equivalent to grid indices scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * image_w vec_img = np.zeros(image_w * image_h, dtype='u1') + 255 vec_img[np.unique(scalar_indices)] = 0 image3 = np.reshape(vec_img, (image_w, image_h)) f, axarr = plt.subplots(1, 2) axarr[0].imshow(image2) axarr[1].imshow(image3) plt.show() path = '../../Myhal_Simulation/Simulator/JackalTourGuide/src/jackal_velodyne/maps' pgm_file = 'myhal_map_V5.pgm' yml_file = 'myhal_map_V5.yaml' doc['image'] = pgm_file if False and exists(join(path, pgm_file)): imagetest = read_pgm(join(path, pgm_file), byteorder='<') plt.imshow(imagetest) plt.show() else: with open(join(path, yml_file), 'w') as outfile: yaml.dump(doc, outfile) write_pgm(join(path, pgm_file), image3) return def normals_orientation(normals): # Discretise the sphere in carthesian coordiantes to avoid the resolution problem at poles voxel_size = 0.05 # Compute voxel indice for each point grid_indices = (np.floor(normals / voxel_size)).astype(int) # Limits of the grid min_grid_indices = np.amin(grid_indices, axis=0) max_grid_indices = np.amax(grid_indices, axis=0) # Number of cells in each direction deltaX, deltaY, deltaZ = max_grid_indices - min_grid_indices + 1 # Relocate indices grid_indices -= min_grid_indices # Scalar equivalent to grid indices scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * deltaX + grid_indices[:, 2] * deltaX * deltaY unique_inds, inverse, counts = np.unique(scalar_indices, return_counts=True, return_inverse=True) # Get counts in a 3D matrix unique_z = unique_inds // (deltaX * deltaY) unique_inds -= unique_z * deltaX * deltaY unique_y = unique_inds // deltaX unique_x = unique_inds - unique_y * deltaX count_matrix = np.zeros((deltaX, deltaY, deltaZ), dtype=np.float32) count_matrix[unique_x, unique_y, unique_z] += counts # Smooth them with a gaussian filter convolution torch_conv = torch.nn.Conv3d(1, 1, kernel_size=5, stride=1, bias=False) torch_conv.weight.requires_grad_(False) torch_conv.weight *= 0 torch_conv.weight += gaussian_conv_filter(3, 5) torch_conv.weight *= torch.sum(torch_conv.weight) ** -1 count_matrix = np.expand_dims(count_matrix, 0) count_matrix = np.expand_dims(count_matrix, 0) torch_count = torch.from_numpy(count_matrix) torch_count = torch.nn.functional.pad(torch_count, [2, 2, 2, 2, 2, 2]) smooth_counts = torch.squeeze(torch_conv(torch_count)) smooth_counts = smooth_counts.numpy()[unique_x, unique_y, unique_z] ################################################# # Create weight according to the normal direction ################################################# # Show histogram in a spherical point cloud n_cloud = np.vstack((unique_x, unique_y, unique_z)).astype(np.float32).T n_cloud = (n_cloud + min_grid_indices.astype(np.float32) + 0.5) * voxel_size # Only 20% of the normals bins are kept For the rest, we use weights based on ditances mask = (smooth_counts > np.percentile(smooth_counts, 80)) # Align with weighted PCA # weighted_cloud = n_cloud[mask] * np.expand_dims(smooth_counts[mask], 1) weighted_cloud = n_cloud[mask] # mean_P = np.sum(weighted_cloud, axis=0) / np.sum(smooth_counts) # print(mean_P.shape) # cloud_0 = n_cloud - mean_P # TODO: Covarariance not robust, do a ICP??? cov_mat = np.matmul(weighted_cloud.T, weighted_cloud) / n_cloud[mask].shape[0] #np.sum(smooth_counts[mask]) eigen_values, eigen_vectors = np.linalg.eigh(cov_mat) # Correct eigenvectors orientation with centroid # mean_P = np.sum(weighted_cloud, axis=0) / np.sum(smooth_counts) # rotated_centroids = np.matmul(mean_P, eigen_vectors.T) # corrections = (rotated_centroids > 0).astype(eigen_vectors.dtype) * 2 - 1 return n_cloud, smooth_counts, eigen_vectors def rot_trans_diffs(all_H): all_R = all_H[:, :3, :3] all_R_T = np.transpose(all_R, (0, 2, 1)) dR = np.matmul(all_R[1:], all_R_T[:-1]) dR = np.arccos(np.clip((np.trace(dR, axis1=1, axis2=2) - 1) / 2, -1.0, 1.0)) dT = all_H[1:, :3, 3] - all_H[:-1, :3, 3] dT = np.linalg.norm(dT, axis=1) return dT, dR def interp_pose(t, H0, H1): """ Interpolate pose at time t (between 0 and 1) between the pose H(t=0) and H(t=1) :param t: interpolation time :param H0: first pose :param H1: second pose :return: interpolated pose """ # Create a slerp interpolation function for the rotation part of the transform R1 = H0[:3, :3] R2 = H1[:3, :3] key_rots = scipyR.from_matrix(np.stack((R1, R2), axis=0)) slerp = Slerp([0, 1], key_rots) interp_R = slerp(t).as_matrix() # Create linear interpolation for translation interp_H = (1 - t) * H0 + t * H1 # Update rotation part of the transform interp_H[:3, :3] = interp_R return interp_H def frame_H_to_points(H_f, size=1.0): # Create artificial frames x = np.linspace(0, size, 50, dtype=np.float32) points = np.hstack((np.vstack((x, x * 0, x * 0)), np.vstack((x * 0, x, x * 0)), np.vstack((x * 0, x * 0, x)))).T colors = ((points > 0.1 * size).astype(np.float32) * 255).astype(np.uint8) hpoints = np.hstack((points, np.ones_like(points[:, :1]))) hpoints = np.matmul(hpoints, H_f.T) return hpoints[:, :3], colors def save_trajectory(filename, all_traj_H): # Save full trajectory all_traj_pts = [] all_traj_clrs = [] for save_i, save_H in enumerate(all_traj_H): # Save trajectory traj_pts, traj_clrs = frame_H_to_points(save_H, size=0.1) traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * save_i)) all_traj_pts.append(traj_pts.astype(np.float32)) all_traj_clrs.append(traj_clrs) write_ply(filename, [np.vstack(all_traj_pts), np.vstack(all_traj_clrs)], ['x', 'y', 'z', 't', 'red', 'green', 'blue']) def cart2pol(xyz): """ Convertion from 3D carthesian coordinates xyz to 3D polar coordinates rtp :param xyz: [N,3] matrix of x, y, z coordinates :return: [N,3] matrix of rho, theta, phi coordinates """ rho = np.linalg.norm(xyz, axis=1) phi = (3 * np.pi / 2 - np.arctan2(xyz[:, 1], xyz[:, 0])) % (2 * np.pi) theta = np.arctan2(np.linalg.norm(xyz[:, :2], axis=1), xyz[:, 2]) return np.vstack((rho, theta, phi)).T def pol2cart(rtp): """ Convertion from 3D polar coordinates rtp to 3D carthesian coordinates xyz :param rtp: [N,3] matrix of rho, theta, phi coordinates :return: [N,3] matrix of x, y, z coordinates """ x = rtp[:, 0] * np.sin(rtp[:, 1]) * np.cos(rtp[:, 2]) y = rtp[:, 0] * np.sin(rtp[:, 1]) * np.sin(rtp[:, 2]) z = rtp[:, 0] * np.cos(rtp[:, 1]) return np.vstack((x, y, z)).T def ssc_to_homo(ssc, ssc_in_radians=True): # Convert 6-DOF ssc coordinate transformation to 4x4 homogeneous matrix # transformation if ssc.ndim == 1: reduce = True ssc = np.expand_dims(ssc, 0) else: reduce = False if not ssc_in_radians: ssc[:, 3:] = np.pi / 180.0 * ssc[:, 3:] sr = np.sin(ssc[:, 3]) cr = np.cos(ssc[:, 3]) sp = np.sin(ssc[:, 4]) cp = np.cos(ssc[:, 4]) sh = np.sin(ssc[:, 5]) ch = np.cos(ssc[:, 5]) H = np.zeros((ssc.shape[0], 4, 4)) H[:, 0, 0] = ch*cp H[:, 0, 1] = -sh*cr + ch*sp*sr H[:, 0, 2] = sh*sr + ch*sp*cr H[:, 1, 0] = sh*cp H[:, 1, 1] = ch*cr + sh*sp*sr H[:, 1, 2] = -ch*sr + sh*sp*cr H[:, 2, 0] = -sp H[:, 2, 1] = cp*sr H[:, 2, 2] = cp*cr H[:, 0, 3] = ssc[:, 0] H[:, 1, 3] = ssc[:, 1] H[:, 2, 3] = ssc[:, 2] H[:, 3, 3] = 1 if reduce: H = np.squeeze(H) return H def verify_magic(s): magic = 44444 m = struct.unpack('<HHHH', s) return len(m)>=4 and m[0] == magic and m[1] == magic and m[2] == magic and m[3] == magic def test_read_hits(): data_path = '../../Data/NCLT' velo_folder = 'velodyne_data' day = '2012-01-08' hits_path = join(data_path, velo_folder, day, 'velodyne_hits.bin') all_utimes = [] all_hits = [] all_ints = [] num_bytes = getsize(hits_path) current_bytes = 0 with open(hits_path, 'rb') as f_bin: total_hits = 0 first_utime = -1 last_utime = -1 while True: magic = f_bin.read(8) if magic == b'': break if not verify_magic(magic): print('Could not verify magic') num_hits = struct.unpack('<I', f_bin.read(4))[0] utime = struct.unpack('<Q', f_bin.read(8))[0] # Do not convert padding (it is an int always equal to zero) padding = f_bin.read(4) total_hits += num_hits if first_utime == -1: first_utime = utime last_utime = utime hits = [] ints = [] for i in range(num_hits): x = struct.unpack('<H', f_bin.read(2))[0] y = struct.unpack('<H', f_bin.read(2))[0] z = struct.unpack('<H', f_bin.read(2))[0] i = struct.unpack('B', f_bin.read(1))[0] l = struct.unpack('B', f_bin.read(1))[0] hits += [[x, y, z]] ints += [i] utimes = np.full((num_hits,), utime - first_utime, dtype=np.int32) ints = np.array(ints, dtype=np.uint8) hits = np.array(hits, dtype=np.float32) hits *= 0.005 hits += -100.0 all_utimes.append(utimes) all_hits.append(hits) all_ints.append(ints) if 100 * current_bytes / num_bytes > 0.1: break current_bytes += 24 + 8 * num_hits print('{:d}/{:d} => {:.1f}%'.format(current_bytes, num_bytes, 100 * current_bytes / num_bytes)) all_utimes = np.hstack(all_utimes) all_hits = np.vstack(all_hits) all_ints = np.hstack(all_ints) write_ply('test_hits', [all_hits, all_ints, all_utimes], ['x', 'y', 'z', 'intensity', 'utime']) print("Read %d total hits from %ld to %ld" % (total_hits, first_utime, last_utime)) return 0 def raw_frames_ply(): # In files data_path = '../../Data/NCLT' velo_folder = 'velodyne_data' # Out folder out_folder = join(data_path, 'raw_ply') if not exists(out_folder): makedirs(out_folder) # Transformation from body to velodyne frame (from NCLT paper) x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703]) H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False) H_velo_body = np.linalg.inv(H_body_velo) x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50]) H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False) H_lb3_body = np.linalg.inv(H_body_lb3) # properties list for binary file reading properties = [('x', '<u2'), ('y', '<u2'), ('z', '<u2'), ('i', '<u1'), ('l', '<u1')] # Get gt files and days days = np.sort([v_f for v_f in listdir(join(data_path, velo_folder))]) for d, day in enumerate(days): # Out folder day_out_folder = join(out_folder, day) if not exists(day_out_folder): makedirs(day_out_folder) # Day binary file hits_path = join(data_path, velo_folder, day, 'velodyne_hits.bin') # Init variables all_hits = [] num_bytes = getsize(hits_path) current_bytes = 0 frame_i = 0 last_phi = -1 t0 = time.time() last_display = t0 with open(hits_path, 'rb') as f_bin: while True: #################### # Read packet header #################### # Verify packet magic = f_bin.read(8) if magic == b'': break if not verify_magic(magic): print('Could not verify magic') # Get header info num_hits = struct.unpack('<I', f_bin.read(4))[0] utime = struct.unpack('<Q', f_bin.read(8))[0] padding = f_bin.read(4) # Do not convert padding (it is an int always equal to zero) ################## # Read binary hits ################## # Get face data packet_data = np.fromfile(f_bin, dtype=properties, count=num_hits) # Rescale point coordinates hits = np.vstack((packet_data['x'], packet_data['y'], packet_data['z'])).astype(np.float32).T hits *= 0.005 hits += -100.0 ########################## # Gather frame if complete ########################## phi = (np.arctan2(- hits[-1, 1], hits[-1, 0]) - np.pi / 2) % (2 * np.pi) if phi < last_phi: # Stack all frame points f_hits = np.vstack(all_hits) # Save frame frame_name = join(day_out_folder, '{:.0f}.ply'.format(last_utime)) write_ply(frame_name, [f_hits], ['x', 'y', 'z']) # Display t = time.time() if (t - last_display) > 1.0: last_display = t message = '{:s}: frame {:7d} ({:6d} points)' message += ' => {:5.1f}% and {:02d}:{:02d}:{:02d} remaining)' # Predict remaining time elapsed = t - t0 remaining = int(elapsed * num_bytes / current_bytes - elapsed) hours = remaining // 3600 remaining = remaining - 3600 * hours minutes = remaining // 60 seconds = remaining - 60 * minutes print(message.format(day, frame_i, f_hits.shape[0], 100 * current_bytes / num_bytes, hours, minutes, seconds)) # Update variables frame_i += 1 all_hits = [] ############################## # Append hits to current frame ############################## # Update last phi last_phi = phi last_utime = utime # Append new data all_hits.append(hits) # Count bytes already read current_bytes += 24 + 8 * num_hits return 0 def frames_to_ply(show_frames=False): # In files data_path = '../../Data/NCLT' velo_folder = 'velodyne_data' days = np.sort([d for d in listdir(join(data_path, velo_folder))]) for day in days: # Out files ply_folder = join(data_path, 'frames_ply', day) if not exists(ply_folder): makedirs(ply_folder) day_path = join(data_path, velo_folder, day, 'velodyne_sync') f_names = np.sort([f for f in listdir(day_path) if f[-4:] == '.bin']) N = len(f_names) print('Reading', N, 'files') for f_i, f_name in enumerate(f_names): ply_name = join(ply_folder, f_name[:-4] + '.ply') if exists(ply_name): continue t1 = time.time() hits = [] ints = [] with open(join(day_path, f_name), 'rb') as f_bin: while True: x_str = f_bin.read(2) # End of file if x_str == b'': break x = struct.unpack('<H', x_str)[0] y = struct.unpack('<H', f_bin.read(2))[0] z = struct.unpack('<H', f_bin.read(2))[0] intensity = struct.unpack('B', f_bin.read(1))[0] l = struct.unpack('B', f_bin.read(1))[0] hits += [[x, y, z]] ints += [intensity] ints = np.array(ints, dtype=np.uint8) hits = np.array(hits, dtype=np.float32) hits *= 0.005 hits += -100.0 write_ply(ply_name, [hits, ints], ['x', 'y', 'z', 'intensity']) t2 = time.time() print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1)) if show_frames: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(hits[:, 0], hits[:, 1], -hits[:, 2], c=-hits[:, 2], s=5, linewidths=0) plt.show() return 0 def merge_day_pointclouds(show_day_trajectory=False, only_SLAM_nodes=False): """ Recreate the whole day point cloud thks to gt pose Generate gt_annotation of mobile objects """ # In files data_path = '../../Data/NCLT' gt_folder = 'ground_truth' cov_folder = 'ground_truth_cov' # Transformation from body to velodyne frame (from NCLT paper) x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703]) H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False) H_velo_body = np.linalg.inv(H_body_velo) x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50]) H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False) H_lb3_body = np.linalg.inv(H_body_lb3) # Get gt files and days gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv']) cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv']) days = [d[:-4].split('_')[1] for d in gt_files] # Load all gt poses print('\nLoading days groundtruth poses...') t0 = time.time() gt_H = [] gt_t = [] for d, gt_f in enumerate(gt_files): t1 = time.time() gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl') if exists(gt_pkl_file): # Read pkl with open(gt_pkl_file, 'rb') as f: day_gt_t, day_gt_H = pickle.load(f) else: # File paths gt_csv = join(data_path, gt_folder, gt_f) # Load gt gt = np.loadtxt(gt_csv, delimiter=',') # Convert gt to homogenous rotation/translation matrix day_gt_t = gt[:, 0] day_gt_H = ssc_to_homo(gt[:, 1:]) # Save pickle with open(gt_pkl_file, 'wb') as f: pickle.dump([day_gt_t, day_gt_H], f) t2 = time.time() print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1)) gt_t += [day_gt_t] gt_H += [day_gt_H] if show_day_trajectory: cov_csv = join(data_path, cov_folder, cov_files[d]) cov = np.loadtxt(cov_csv, delimiter=',') t_cov = cov[:, 0] t_cov_bool = np.logical_and(t_cov > np.min(day_gt_t), t_cov < np.max(day_gt_t)) t_cov = t_cov[t_cov_bool] # Note: Interpolation is not needed, this is done as a convinience interp = scipy.interpolate.interp1d(day_gt_t, day_gt_H[:, :3, 3], kind='nearest', axis=0) node_poses = interp(t_cov) plt.figure() plt.scatter(day_gt_H[:, 1, 3], day_gt_H[:, 0, 3], 1, c=-day_gt_H[:, 2, 3], linewidth=0) plt.scatter(node_poses[:, 1], node_poses[:, 0], 1, c=-node_poses[:, 2], linewidth=5) plt.axis('equal') plt.title('Ground Truth Position of Nodes in SLAM Graph') plt.xlabel('East (m)') plt.ylabel('North (m)') plt.colorbar() plt.show() t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) # Out files out_folder = join(data_path, 'day_ply') if not exists(out_folder): makedirs(out_folder) # Focus on a particular point p0 = np.array([-220, -527, 12]) center_radius = 10.0 point_radius = 50.0 # Loop on days for d, day in enumerate(days): #if day != '2012-02-05': # continue day_min_t = gt_t[d][0] day_max_t = gt_t[d][-1] frames_folder = join(data_path, 'frames_ply', day) f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply']) # If we want, load only SLAM nodes if only_SLAM_nodes: # Load node timestamps cov_csv = join(data_path, cov_folder, cov_files[d]) cov = np.loadtxt(cov_csv, delimiter=',') t_cov = cov[:, 0] t_cov_bool = np.logical_and(t_cov > day_min_t, t_cov < day_max_t) t_cov = t_cov[t_cov_bool] # Find closest lidar frames t_cov = np.expand_dims(t_cov, 1) diffs = np.abs(t_cov - f_times) inds = np.argmin(diffs, axis=1) f_times = f_times[inds] # Is this frame in gt f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t) f_times = f_times[f_t_bool] # Interpolation gt poses to frame timestamps interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0) frame_poses = interp(f_times) N = len(f_times) world_points = [] world_frames = [] world_frames_c = [] print('Reading', day, ' => ', N, 'files') for f_i, f_t in enumerate(f_times): t1 = time.time() ######### # GT pose ######### H = frame_poses[f_i].astype(np.float32) # s = '\n' # for cc in H: # for c in cc: # s += '{:5.2f} '.format(c) # s += '\n' # print(s) ############# # Focus check ############# if np.linalg.norm(H[:3, 3] - p0) > center_radius: continue ################################### # Local frame coordinates for debug ################################### # Create artificial frames x = np.linspace(0, 1, 50, dtype=np.float32) points = np.hstack((np.vstack((x, x*0, x*0)), np.vstack((x*0, x, x*0)), np.vstack((x*0, x*0, x)))).T colors = ((points > 0.1).astype(np.float32) * 255).astype(np.uint8) hpoints = np.hstack((points, np.ones_like(points[:, :1]))) hpoints = np.matmul(hpoints, H.T) hpoints[:, 3] *= 0 world_frames += [hpoints[:, :3]] world_frames_c += [colors] ####################### # Load velo point cloud ####################### # Load frame ply file f_name = '{:.0f}.ply'.format(f_t) data = read_ply(join(frames_folder, f_name)) points = np.vstack((data['x'], data['y'], data['z'])).T #intensity = data['intensity'] hpoints = np.hstack((points, np.ones_like(points[:, :1]))) hpoints = np.matmul(hpoints, H.T) hpoints[:, 3] *= 0 hpoints[:, 3] += np.sqrt(f_t - f_times[0]) # focus check focus_bool = np.linalg.norm(hpoints[:, :3] - p0, axis=1) < point_radius hpoints = hpoints[focus_bool, :] world_points += [hpoints] t2 = time.time() print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1)) if len(world_points) < 2: continue world_points = np.vstack(world_points) ###### DEBUG world_frames = np.vstack(world_frames) world_frames_c = np.vstack(world_frames_c) write_ply('testf.ply', [world_frames, world_frames_c], ['x', 'y', 'z', 'red', 'green', 'blue']) ###### DEBUG print(world_points.shape, world_points.dtype) # Subsample merged frames # world_points, features = grid_subsampling(world_points[:, :3], # features=world_points[:, 3:], # sampleDl=0.1) features = world_points[:, 3:] world_points = world_points[:, :3] print(world_points.shape, world_points.dtype) write_ply('test' + day + '.ply', [world_points, features], ['x', 'y', 'z', 't']) # Generate gt annotations # Subsample day ply (for visualization) # Save day ply # a = 1/0 def local_PCA(points): # Compute the barycenter center = np.mean(points, axis=0) # Centered clouds points_c = points - center # Covariance matrix C = (points_c.T).dot(points_c) / points.shape[0] # Eigenvalues return np.linalg.eigh(C) def estimate_normals_planarity_debug(cloud): """ Custom function that estimates normals and planarity of lidar frames, using polar coordinates neighborhoods. :param cloud: Open3D PointCloud. :return: planarities (Normals are modified in place) """ # Rescale for numerical stability # t = [time.time()] # Get point cloud points = cloud.astype(np.float32) normals0, planarity, linearity = polar_normals(points.astype(np.float32), verbose=1) scores0 = planarity + linearity t += [time.time()] print(normals0.dtype, normals0.shape) print(scores0.dtype, scores0.shape) # Transform to polar coordinates polar_points = cart2pol(points) t += [time.time()] # Define search radius in l1 metric. Vertical angular resolution of HDL32 is 1.29 angular_res = 1.29 * np.pi / 180 polar_r = 1.5 * angular_res # Define horizontal scale (smaller distance for the neighbor in horizontal direction) horizontal_scale = 0.5 # Use log of range so that neighbor radius is proportional to the range. range_scale = 4.0 polar_points[:, 0] = np.log(polar_points[:, 0]) * polar_r / (np.log((1 + polar_r) / (1 - polar_r)) * range_scale) # Apply horizontal scale polar_points[:, 2] *= 1 / horizontal_scale t += [time.time()] # Create 2d KDTree to search lidar neighborhoods polar_tree = KDTree(polar_points, metric='l2') t += [time.time()] # Find neighbors all_neighb_inds = polar_tree.query_radius(polar_points, polar_r) t += [time.time()] # Rescale everything # polar_points[:, 2] *= horizontal_scale # polar_points[:, 0] = np.exp(polar_points[:, 0] * np.log((1 + polar_r) / (1 - polar_r)) * range_scale / polar_r) # Compute covariance matrices all_eigenvalues = np.empty(polar_points.shape, dtype=np.float32) all_eigenvectors = np.empty((polar_points.shape[0], 3, 3), dtype=np.float32) for i, neighb_inds in enumerate(all_neighb_inds): all_eigenvalues[i, :], all_eigenvectors[i, :, :] = local_PCA(points[neighb_inds, :]) t += [time.time()] # Compute normals and planarity normals = all_eigenvectors[:, :, 0] sphericity = 1 -all_eigenvalues[:, 0] / (all_eigenvalues[:, 2] + 1e-9) t += [time.time()] # Choose random point for showing rand_inds = np.random.randint(polar_points.shape[0], size=100) features = np.zeros_like(polar_points[:, 2]) for ri, rand_id in enumerate(rand_inds): features[all_neighb_inds[rand_id]] = ri features[rand_id] = 2 * len(rand_inds) write_ply('ttt_xyz.ply', [points, normals, features, sphericity, scores0], ['x', 'y', 'z', 'nx', 'ny', 'nz', 'f', 'score', 'cpp_score']) # polar_points[:, 1] *= 180 / np.pi # polar_points[:, 2] *= 180 / np.pi #polar_points[:, 0] = np.exp(polar_points[:, 0] * np.log((1 + polar_r) / (1 - polar_r)) * range_scale / polar_r) polar_points = polar_points[:, [2, 1, 0]] write_ply('ttt_rtp.ply', [polar_points, polar_points[:, 1] * 0, features], ['x', 'y', 'z', 'i', 'f']) # Filter outlier from ray/edges # Assign normals to pointcloud structure #cloud.normals = o3d.utility.Vector3dVector(normals) t += [time.time()] # Display timings print('\n*****************\n') print('Validation timings:') i = 0 print('C++ ....... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('polar ..... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('scale ... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Tree ...... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('neighb .... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('PCA ...... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('features . {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Assign ... {:.1f}s'.format(1000 * (t[i + 1] - t[i]))) print('\n*****************\n') return sphericity def estimate_normals_planarity(cloud): """ Custom function that estimates normals and planarity of lidar frames, using polar coordinates neighborhoods. :param cloud: Open3D PointCloud. :return: planarities (Normals are modified in place) """ # Get point cloud points = np.asarray(cloud.points) normals, planarity, linearity = polar_normals(points.astype(np.float32), verbose=1) # Assign normals to pointcloud structure cloud.normals = o3d.utility.Vector3dVector(normals) return scores def gaussian_conv_filter(dimension=3, size=5): # Sigma according to size sig = (size/2 - 0.5) / 2 eps = 1e-6 # Get coordinates coords = np.arange(-size/2 + 0.5, size/2, 1.0) if dimension == 2: x, y = np.meshgrid(coords, coords) sq_r = x ** 2 + y ** 2 elif dimension == 3: x, y, z = np.meshgrid(coords, coords, coords) sq_r = x ** 2 + y ** 2 + z ** 2 elif dimension == 4: x, y, z, t = np.meshgrid(coords, coords, coords, coords) sq_r = x ** 2 + y ** 2 + z ** 2 + t ** 2 else: raise ValueError('Unsupported dimension (max is 4)') return torch.exp(-torch.from_numpy(sq_r.astype(np.float32)) / (2 * sig ** 2 + eps)) def normal_filtering(normals, debug=False): # Discretise the sphere in carthesian coordiantes to avoid the pole reolution problem voxel_size = 0.05 # Compute voxel indice for each point grid_indices = (np.floor(normals / voxel_size)).astype(int) # Limits of the grid min_grid_indices = np.amin(grid_indices, axis=0) max_grid_indices = np.amax(grid_indices, axis=0) # Number of cells in each direction deltaX, deltaY, deltaZ = max_grid_indices - min_grid_indices + 1 # Relocate indices grid_indices -= min_grid_indices # Scalar equivalent to grid indices scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * deltaX + grid_indices[:, 2] * deltaX * deltaY unique_inds, inverse, counts = np.unique(scalar_indices, return_counts=True, return_inverse=True) # Get counts in a 3D matrix unique_z = unique_inds // (deltaX * deltaY) unique_inds -= unique_z * deltaX * deltaY unique_y = unique_inds // deltaX unique_x = unique_inds - unique_y * deltaX count_matrix = np.zeros((deltaX, deltaY, deltaZ), dtype=np.float32) count_matrix[unique_x, unique_y, unique_z] += counts # Smooth them with a gaussian filter convolution torch_conv = torch.nn.Conv3d(1, 1, kernel_size=5, stride=1, bias=False) torch_conv.weight.requires_grad_(False) torch_conv.weight *= 0 torch_conv.weight += gaussian_conv_filter(3, 5) torch_conv.weight *= torch.sum(torch_conv.weight) ** -1 count_matrix = np.expand_dims(count_matrix, 0) count_matrix = np.expand_dims(count_matrix, 0) torch_count = torch.from_numpy(count_matrix) torch_count = torch.nn.functional.pad(torch_count, [2, 2, 2, 2, 2, 2]) smooth_counts = torch.squeeze(torch_conv(torch_count)) smooth_counts = smooth_counts.numpy()[unique_x, unique_y, unique_z] ################################################# # Create weight according to the normal direction ################################################# # Only 20% of the normals bins are kept For the rest, we use weights based on ditances weights = (smooth_counts > np.percentile(smooth_counts, 80)).astype(np.float32) # Show histogram in a spherical point cloud if debug: n_cloud = np.vstack((unique_x, unique_y, unique_z)).astype(np.float32).T n_cloud = (n_cloud + min_grid_indices.astype(np.float32) + 0.5) * voxel_size #n_cloud = n_cloud / np.linalg.norm(n_cloud, axis=1, keepdims=True) write_ply('nnn_NORMAL_HIST.ply', [n_cloud, smooth_counts], ['x', 'y', 'z', 'counts']) a = 1/0 return weights[inverse] def load_gt_poses(gt_path, only_day_1=False): gt_files = np.sort([gt_f for gt_f in listdir(gt_path) if gt_f[-4:] == '.csv']) gt_H = [] gt_t = [] for d, gt_f in enumerate(gt_files): t1 = time.time() gt_pkl_file = join(gt_path, gt_f[:-4] + '.pkl') if exists(gt_pkl_file): # Read pkl with open(gt_pkl_file, 'rb') as f: day_gt_t, day_gt_H = pickle.load(f) else: # File paths gt_csv = join(gt_path, gt_f) # Load gt gt = np.loadtxt(gt_csv, delimiter=',') # Convert gt to homogenous rotation/translation matrix day_gt_t = gt[:, 0] day_gt_H = ssc_to_homo(gt[:, 1:]) # Save pickle with open(gt_pkl_file, 'wb') as f: pickle.dump([day_gt_t, day_gt_H], f) t2 = time.time() print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1)) gt_t += [day_gt_t] gt_H += [day_gt_H] if only_day_1 and d > -1: break return gt_t, gt_H def get_area_frames(days, gt_t, gt_H, raw_path, area_center, area_radius, only_day_1=False): # Loop on days day_f_times = [] for d, day in enumerate(days): # Get frame timestamps frames_folder = join(raw_path, day) f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply']) # Ground truth does not cover all frames day_min_t = gt_t[d][0] day_max_t = gt_t[d][-1] f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t) f_times = f_times[f_t_bool] # Interpolation gt poses to frame timestamps interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0) frame_poses = interp(f_times) # Closest frame to picked point closest_i = 0 closest_d = 1e6 new_f_times = [] for f_i, f_t in enumerate(f_times): # GT pose H = frame_poses[f_i].astype(np.float32) # Focus check f_dist = np.linalg.norm(H[:3, 3] - area_center) if f_dist > area_radius: continue # Save closest frame if (f_dist < closest_d): closest_d = f_dist closest_i = len(new_f_times) # Append frame to candidates new_f_times.append(f_t) # Filter to only get subsequent frames new_f_times = np.array(new_f_times, dtype=np.float64) gaps = new_f_times[1:] - new_f_times[:-1] med_gap = np.median(gaps[:50]) jumps = np.sort(np.where(gaps > 5 * med_gap)[0]) i0 = 0 i1 = len(new_f_times) for j in jumps: if j + 1 < closest_i: i0 = j + 1 for j in jumps[::-1]: if j + 1 > closest_i: i1 = j + 1 day_f_times.append(new_f_times[i0:i1]) if only_day_1 and d > -1: break return day_f_times def test_icp_registration(): """ Test ICP registration Use GT to extract a small interesting region. """ ############ # Parameters ############ # In files data_path = '../../Data/NCLT' gt_folder = 'ground_truth' cov_folder = 'ground_truth_cov' # Transformation from body to velodyne frame (from NCLT paper) x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703]) H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False) H_velo_body = np.linalg.inv(H_body_velo) x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50]) H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False) H_lb3_body = np.linalg.inv(H_body_lb3) # Out files out_folder = join(data_path, 'day_ply') if not exists(out_folder): makedirs(out_folder) # Get gt files and days gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv']) cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv']) days = [d[:-4].split('_')[1] for d in gt_files] ############### # Load GT poses ############### print('\nLoading days groundtruth poses...') t0 = time.time() gt_H = [] gt_t = [] for d, gt_f in enumerate(gt_files): t1 = time.time() gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl') if exists(gt_pkl_file): # Read pkl with open(gt_pkl_file, 'rb') as f: day_gt_t, day_gt_H = pickle.load(f) else: # File paths gt_csv = join(data_path, gt_folder, gt_f) # Load gt gt = np.loadtxt(gt_csv, delimiter=',') # Convert gt to homogenous rotation/translation matrix day_gt_t = gt[:, 0] day_gt_H = ssc_to_homo(gt[:, 1:]) # Save pickle with open(gt_pkl_file, 'wb') as f: pickle.dump([day_gt_t, day_gt_H], f) t2 = time.time() print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1)) gt_t += [day_gt_t] gt_H += [day_gt_H] if d > -1: break t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) ######################## # Get lidar frames times ######################## # Focus on a particular point p0 = np.array([-220, -527, 12]) center_radius = 10.0 point_radius = 50.0 print('\nGet timestamps in focused area...') t0 = time.time() # Loop on days day_f_times = [] for d, day in enumerate(days): day_min_t = gt_t[d][0] day_max_t = gt_t[d][-1] frames_folder = join(data_path, 'raw_ply', day) f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply']) # Is this frame in gt f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t) f_times = f_times[f_t_bool] # Interpolation gt poses to frame timestamps interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0) frame_poses = interp(f_times) N = len(f_times) new_f_times = [] for f_i, f_t in enumerate(f_times): t1 = time.time() # GT pose H = frame_poses[f_i].astype(np.float32) # Focus check if np.linalg.norm(H[:3, 3] - p0) > center_radius: continue new_f_times.append(f_t) # DEBUGGGGGG new_f_times = new_f_times[5:-5] day_f_times.append(np.array(new_f_times, dtype=np.float64)) if d > -1: break t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) ########################### # coarse map with pt2pt icp ########################### for d, day in enumerate(days): frames_folder = join(data_path, 'raw_ply', day) N = len(day_f_times[d]) print('Reading', day, ' => ', N, 'files') # Load first frame as map last_transform = np.eye(4) last_cloud = None threshold = 0.3 score_thresh = 0.99 voxel_size = 0.1 transform_list = [] cloud_list = [] cloud_map = None full_map = None full_map_t = None verbose = 1 t = [time.time()] for f_i, f_t in enumerate(day_f_times[d]): ####################### # Load velo point cloud ####################### t = [time.time()] # Load frame ply file f_name = '{:.0f}.ply'.format(f_t) cloud = o3d.io.read_point_cloud(join(frames_folder, f_name)) t += [time.time()] # Cloud normals and planarity scores = estimate_normals_planarity(cloud) if f_i < 1: last_cloud = cloud cloud_map = cloud continue t += [time.time()] # Remove low score for fitting cloud_down = o3d.geometry.PointCloud() cloud_down.points = o3d.utility.Vector3dVector(np.asarray(cloud.points)[scores > score_thresh, :]) cloud_down.normals = o3d.utility.Vector3dVector(np.asarray(cloud.normals)[scores > score_thresh, :]) # Downsample target cloud_down = cloud_down.voxel_down_sample(voxel_size) # if f_i > 2: # # np.asarray(last_cloud.normals).astype(np.float32) # new_scores = np.ones_like(np.asarray(cloud_down.points).astype(np.float32))[:, 0] # H, rms = pt2pl_icp(np.asarray(cloud_down.points).astype(np.float32), # np.asarray(last_cloud.points).astype(np.float32), # np.asarray(last_cloud.normals).astype(np.float32), # new_scores, # n_samples=1000, # max_pairing_dist=0.2, # max_iter=10, # minDiffRMS=0.001) # # print(H) # print(rms) # a = 1 / 0 t += [time.time()] # Measure initial ICP metrics if verbose == 2: reg_init = o3d.registration.evaluate_registration(cloud_down, last_cloud, threshold, last_transform) t += [time.time()] else: reg_init = None # Apply ICP reg_pt2pl = o3d.registration.registration_icp( cloud_down, last_cloud, threshold, last_transform, o3d.registration.TransformationEstimationPointToPlane()) t += [time.time()] # Print results if verbose == 2: print('ICP convergence:') print('fitness ...... {:7.4f} => {:7.4f}'.format(reg_init.fitness, reg_pt2pl.fitness)) print('inlier_rmse .. {:7.4f} => {:7.4f}'.format(reg_init.inlier_rmse, reg_pt2pl.inlier_rmse)) print('corresp_n .... {:7d} => {:7d}'.format(np.asarray(reg_init.correspondence_set).shape[0], np.asarray(reg_pt2pl.correspondence_set).shape[0])) # Apply transformation for the init of next step cloud_down.transform(reg_pt2pl.transformation) if verbose == 2: # Save init cloud # cloud_init = copy.deepcopy(cloud) # cloud_init.transform(last_transform) # write_ply('ttt_{:d}_init.ply'.format(f_i), # [np.asarray(cloud_init.points)], # ['x', 'y', 'z']) # Save result cloud cloud.transform(reg_pt2pl.transformation) write_ply('ttt_{:d}_reg.ply'.format(f_i), [np.asarray(cloud.points)], ['x', 'y', 'z']) t += [time.time()] # Update sub map cloud_map.points.extend(cloud.points) cloud_map = cloud_map.voxel_down_sample(voxel_size=voxel_size) write_ply('tt_sub_map.ply'.format(f_i), [np.asarray(cloud_map.points)], ['x', 'y', 'z']) # Update full map if full_map is None: full_map = copy.deepcopy(cloud_down) full_map_t = np.full(shape=(np.asarray(cloud_down.points).shape[0],), fill_value=f_t - day_f_times[d][0], dtype=np.float64) else: full_map.points.extend(cloud_down.points) full_map_t = np.hstack((full_map_t, np.full(shape=(np.asarray(cloud_down.points).shape[0],), fill_value=f_t - day_f_times[d][0], dtype=np.float64))) write_ply('tt_full_map.ply'.format(f_i), [np.asarray(full_map.points), full_map_t], ['x', 'y', 'z', 't']) t += [time.time()] # Update variables last_cloud = cloud_down last_transform = reg_pt2pl.transformation transform_list += [reg_pt2pl.transformation] cloud_list += [np.asarray(cloud_down.points).astype(np.float32)] t += [time.time()] if verbose > 0: print('{:.0f} registered on {:.0f} in {:.1f}ms ({:d}/{:d})'.format(f_t, day_f_times[d][f_i - 1], 1000 * (t[-1] - t[0]), f_i, N)) # Display timings if verbose == 2: print('\n*********************') i = 0 print('Load ...... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Normals ... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Filter .... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Eval ...... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('ICP ....... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Transform . {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Save ...... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) i += 1 print('Update .... {:.1f}ms'.format(1000 * (t[i + 1] - t[i]))) print('*********************\n') print('\n********************************************\n') # Save results full_map = np.vstack(cloud_list) times_list = [f_t - day_f_times[d][0] for f_t in day_f_times[d][1:]] full_map_t = np.vstack([np.full((cld.shape[0], 1), f_t, dtype=np.float64) for f_t, cld in zip(times_list, cloud_list)]) write_ply('tt_full_map.ply', [full_map, full_map_t], ['x', 'y', 'z', 't']) # TODO: # > Multithread this first path at a python level (use Pytorch?). No need for multitherad cpp wrapper # > Second path (refinement) with normals re-estimnated on the map # > Take motion distortion into account (in second path). # > Use graph optimization for loop closure and day merging a = 1 / 0 def bundle_icp_debug(verbose=2): """ Test ICP registration Use GT to extract a small interesting region. """ ############ # Parameters ############ # Path to data data_path = '../../Data/NCLT' gt_folder = 'ground_truth' raw_folder = 'raw_ply' days = np.sort([d for d in listdir(join(data_path, raw_folder))]) # Out files out_folder = join(data_path, 'day_ply') if not exists(out_folder): makedirs(out_folder) # Stride (nb of frames skipped for transformations) frame_stride = 2 # Bundle size (number of frames jointly optimized) and stride (nb of frames between each bundle start) bundle_size = 7 bundle_stride = bundle_size - 1 # Normal estimation parameters score_thresh = 0.99 # Pointcloud filtering parameters map_voxel_size = 0.05 frame_voxel_size = -0.05 # Group of frames saved together save_group = 100 ############### # Load GT poses ############### print('\nLoading days groundtruth poses...') t0 = time.time() gt_t, gt_H = load_gt_poses(join(data_path, gt_folder), only_day_1=True) t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) ####################### # Get lidar frame times ####################### # Focus on a particular point p0 = np.array([-220, -527, 12]) R0 = 10.0 print('\nGet timestamps in focused area...') t0 = time.time() day_f_times = get_area_frames(days, gt_t, gt_H, join(data_path, raw_folder), p0, R0, only_day_1=True) t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) ########################### # coarse map with pt2pl icp ########################### for d, day in enumerate(days): # List of transformation we are trying to optimize frames_folder = join(data_path, 'raw_ply', day) f_times = [f_t for f_t in day_f_times[d][::frame_stride]] transform_list = [np.eye(4) for _ in f_times] last_saved_frames = 0 FPS = 0 N = len(f_times) for b_i, bundle_i0 in enumerate(np.arange(0, len(f_times), bundle_stride)): #################### # Load bundle frames #################### t = [time.time()] if (bundle_i0 + bundle_size > len(f_times)): bundle_i0 = len(f_times) - bundle_size frame_pts = [] frame_norms = [] frame_w = [] for f_t in f_times[bundle_i0:bundle_i0+bundle_size]: # Load ply format points f_name = '{:.0f}.ply'.format(f_t) data = read_ply(join(frames_folder, f_name)) points = np.vstack((data['x'], data['y'], data['z'])).T t += [time.time()] # Get normals normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5) # Remove low quality normals for fitting points = points[norm_scores > score_thresh] normals = normals[norm_scores > score_thresh] norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh) t += [time.time()] # Subsample to reduce number of points if frame_voxel_size > 0: # grid supsampling points, normals = grid_subsampling(points, features=normals, sampleDl=map_voxel_size) # Renormalize normals normals = normals / np.linalg.norm(normals, axis=1, keepdims=True) # Filter out points according to main normal directions (NOt necessary if normals are better computed) bool_filter = normal_filtering(normals) > 0.5 points = points[bool_filter] normals = normals[bool_filter] norm_scores = norm_scores[bool_filter] t += [time.time()] # Compute score for each component of rotations / translation # Weights according to distance the futher, the higher (square rule because points lies on surfaces) #rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1) #weights = np.hstack((rot_scores, -rot_scores)) weights = np.expand_dims(norm_scores, 1) # Gather frames data frame_pts.append(points) frame_norms.append(normals) frame_w.append(weights) t += [time.time()] if verbose == 3: dt = np.array(t[1:]) - np.array(t[:-1]) dt = dt.reshape(bundle_size, -1) timing_names = ['Load', 'Normals', 'Filter', 'Append'] s = '' for t_name in timing_names: s += '{:^10s} '.format(t_name) s += '\n' for b in range(bundle_size): for t_i in range(len(timing_names)): s += '{:^10.1f} '.format(1000 * dt[b, t_i]) s += '\n' print(s) t = t[:1] t += [time.time()] ################## # Apply bundle ICP ################## # for b in range(bundle_size): # w_names = ['w{:d}'.format(i) for i in range(frame_w[b].shape[1])] # write_ply('bb_init_{:02d}.ply'.format(b), # [frame_pts[b], frame_w[b]], # ['x', 'y', 'z'] + w_names) bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts, frame_norms, frame_w, n_samples=1000, max_pairing_dist=0.2, max_iter=200, avg_steps=5) t += [time.time()] save_debug_frames = False if save_debug_frames: print(all_H.shape) bundle_inds = [] steps = [] all_pts = [] for s, HH in enumerate(all_H): for b, H in enumerate(HH): if b == 0: world_H = np.linalg.inv(H) else: world_H = np.eye(4) for bb in range(b, 0, -1): world_H = np.matmul(HH[bb], world_H) pts, clrs = frame_H_to_points(world_H, size=0.1) pts = pts.astype(np.float32) all_pts.append(pts) bundle_inds.append(pts[:, 0]*0+b) steps.append(pts[:, 0]*0+s) write_ply('bb_frames.ply', [np.vstack(all_pts), np.hstack(steps), np.hstack(bundle_inds)], ['x', 'y', 'z', 's', 'b']) debug_rms = False if debug_rms: fig = plt.figure('RMS') for b, b_rms in enumerate(bundle_rms): if b == 0: plt.plot(b_rms, '-', linewidth=2, label='{:d}-0'.format(bundle_size - 1)) else: plt.plot(b_rms, '-', linewidth=1, label='{:d}-{:d}'.format(b, b - 1)) plt.xlabel('steps') plt.ylabel('rms') #plt.legend(loc=1) plt.ylim(0, 0.3) all_H_inv = np.copy(all_H.transpose((0, 1, 3, 2))) all_H_inv[:, :, 3, :3] = 0 all_H_inv[:, :, :3, 3:] = -np.matmul(all_H_inv[:, :, :3, :3], all_H[:, :, :3, 3:]) dH = np.matmul(all_H[1:], all_H_inv[:-1]) dH = dH.transpose((1, 0, 2, 3)) plt.figure('dT') for b, b_dH in enumerate(dH): b_dT = np.linalg.norm(b_dH[:, :3, 3], axis=1) b_dT = running_mean(b_dT, 4) if b == 0: plt.plot(b_dT, '-', linewidth=2, label='{:d}-0'.format(bundle_size - 1)) else: plt.plot(b_dT, '-', linewidth=1, label='{:d}-{:d}'.format(b, b - 1)) plt.xlabel('steps') plt.ylabel('rms') #plt.legend(loc=1) plt.ylim(0, 0.05) plt.figure('dR') for b, b_dH in enumerate(dH): b_dR = np.arccos((np.trace(b_dH[:, :3, :3], axis1=1, axis2=2) - 1) / 2) b_dR = running_mean(b_dR, 4) if b == 0: plt.plot(b_dR, '-', linewidth=2, label='{:d}-0'.format(bundle_size - 1)) else: plt.plot(b_dR, '-', linewidth=1, label='{:d}-{:d}'.format(b, b - 1)) plt.xlabel('steps') plt.ylabel('rms') #plt.legend(loc=1) plt.ylim(0, 0.01) plt.show() a = 1/0 # Update transformations to world coordinates for b in range(bundle_size): world_H = np.eye(4) for bb in range(b, 0, -1): world_H = np.matmul(bundle_H[bb], world_H) world_H = np.matmul(transform_list[bundle_i0], world_H) transform_list[bundle_i0 + b] = world_H t += [time.time()] if verbose == 2: print('Bundle {:9.1f}ms / ICP {:.1f}ms => {:.1f} FPS'.format(1000 * (t[1] - t[0]), 1000 * (t[2] - t[1]), bundle_size / (t[2] - t[1]))) if verbose == 1: fmt_str = 'Bundle [{:3d},{:3d}] --- {:5.1f}% or {:02d}:{:02d}:{:02d} remaining at {:.1f}fps' if bundle_i0 == 0: FPS = bundle_size / (t[-1] - t[0]) else: FPS += (bundle_size / (t[-1] - t[0]) - FPS) / 10 remaining = int((N - (bundle_i0 + bundle_size)) / FPS) hours = remaining // 3600 remaining = remaining - 3600 * hours minutes = remaining // 60 seconds = remaining - 60 * minutes print(fmt_str.format(bundle_i0, bundle_i0 + bundle_size - 1, 100 * (bundle_i0 + bundle_size) / N, hours, minutes, seconds, FPS)) # Save groups of 100 frames together if (bundle_i0 > last_saved_frames + save_group + 1): all_points = [] all_traj_pts = [] all_traj_clrs = [] i0 = last_saved_frames i1 = i0 + save_group for i, world_H in enumerate(transform_list[i0: i1]): # Load ply format points f_name = '{:.0f}.ply'.format(f_times[i0 + i]) data = read_ply(join(frames_folder, f_name)) points = np.vstack((data['x'], data['y'], data['z'])).T # Apply transf world_pts = np.hstack((points, np.ones_like(points[:, :1]))) world_pts = np.matmul(world_pts, world_H.T) # Save frame world_pts[:, 3] = i0 + i all_points.append(world_pts) # also save trajectory traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1) traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i))) all_traj_pts.append(traj_pts.astype(np.float32)) all_traj_clrs.append(traj_clrs) last_saved_frames += save_group filename = join(out_folder, 'd_{:s}_{:05d}.ply'.format(day, i0)) write_ply(filename, [np.vstack(all_points)], ['x', 'y', 'z', 't']) filename = join(out_folder, 'd_{:s}_{:05d}_traj.ply'.format(day, i0)) write_ply(filename, [np.vstack(all_traj_pts), np.vstack(all_traj_clrs)], ['x', 'y', 'z', 't', 'red', 'green', 'blue']) ################# # Post processing ################# all_points = [] all_traj_pts = [] all_traj_clrs = [] i0 = last_saved_frames for i, world_H in enumerate(transform_list[i0:]): # Load ply format points f_name = '{:.0f}.ply'.format(f_times[i0 + i]) data = read_ply(join(frames_folder, f_name)) points = np.vstack((data['x'], data['y'], data['z'])).T # Apply transf world_pts = np.hstack((points, np.ones_like(points[:, :1]))) world_pts = np.matmul(world_pts, world_H.T) # Save frame world_pts[:, 3] = i0 + i all_points.append(world_pts) # also save trajectory traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1) traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i))) all_traj_pts.append(traj_pts.astype(np.float32)) all_traj_clrs.append(traj_clrs) last_saved_frames += save_group filename = join(out_folder, 'd_{:s}_{:05d}.ply'.format(day, i0)) write_ply(filename, [np.vstack(all_points)], ['x', 'y', 'z', 't']) filename = join(out_folder, 'd_{:s}_{:05d}_traj.ply'.format(day, i0)) write_ply(filename, [np.vstack(all_traj_pts), np.vstack(all_traj_clrs)], ['x', 'y', 'z', 't', 'red', 'green', 'blue']) def bundle_icp(frame_names, bundle_size=5, score_thresh=0.99, frame_voxel_size=-1, verbose=2): """ Test ICP registration Use GT to extract a small interesting region. """ ############ # Parameters ############ # Bundle stride (nb of frames between each bundle start) bundle_stride = bundle_size - 1 # Group of frames saved together save_group = 100 # List of transformation we are trying to optimize transform_list = [np.eye(4) for _ in frame_names] last_saved_frames = 0 FPS = 0 N = len(frame_names) for b_i, bundle_i0 in enumerate(np.arange(0, len(frame_names), bundle_stride)): #################### # Load bundle frames #################### t = [time.time()] if (bundle_i0 + bundle_size > N): bundle_i0 = N - bundle_size frame_pts = [] frame_norms = [] frame_w = [] for f_name in frame_names[bundle_i0:bundle_i0+bundle_size]: # Load ply format points data = read_ply(f_name) points = np.vstack((data['x'], data['y'], data['z'])).T # Get normals normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5) norm_scores = planarity + linearity # Remove low quality normals for fitting points = points[norm_scores > score_thresh] normals = normals[norm_scores > score_thresh] norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh) # Subsample to reduce number of points if frame_voxel_size > 0: # grid supsampling points, normals = grid_subsampling(points, features=normals, sampleDl=frame_voxel_size) # Renormalize normals normals = normals / np.linalg.norm(normals, axis=1, keepdims=True) # Filter out points according to main normal directions (NOt necessary if normals are better computed) bool_filter = normal_filtering(normals) > 0.5 points = points[bool_filter] normals = normals[bool_filter] norm_scores = norm_scores[bool_filter] # Compute score for each component of rotations / translation # Weights according to distance the futher, the higher (square rule because points lies on surfaces) #rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1) #weights = np.hstack((rot_scores, -rot_scores)) weights = np.expand_dims(norm_scores, 1) # Gather frames data frame_pts.append(points) frame_norms.append(normals) frame_w.append(weights) t += [time.time()] ################## # Apply bundle ICP ################## bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts, frame_norms, frame_w, n_samples=1000, max_pairing_dist=0.2, max_iter=200, avg_steps=5) t += [time.time()] # Update transformations to world coordinates for b in range(bundle_size): world_H = np.eye(4) for bb in range(b, 0, -1): world_H = np.matmul(bundle_H[bb], world_H) world_H = np.matmul(transform_list[bundle_i0], world_H) transform_list[bundle_i0 + b] = world_H t += [time.time()] if verbose > 0: fmt_str = 'Bundle [{:3d},{:3d}] --- {:5.1f}% or {:02d}:{:02d}:{:02d} remaining at {:.1f}fps' if bundle_i0 == 0: FPS = bundle_size / (t[-1] - t[0]) else: FPS += (bundle_size / (t[-1] - t[0]) - FPS) / 10 remaining = int((N - (bundle_i0 + bundle_size)) / FPS) hours = remaining // 3600 remaining = remaining - 3600 * hours minutes = remaining // 60 seconds = remaining - 60 * minutes print(fmt_str.format(bundle_i0, bundle_i0 + bundle_size - 1, 100 * (bundle_i0 + bundle_size) / N, hours, minutes, seconds, FPS)) # Save groups of 100 frames together if (bundle_i0 > last_saved_frames + save_group + 1): all_points = [] all_traj_pts = [] all_traj_clrs = [] i0 = last_saved_frames i1 = i0 + save_group for i, world_H in enumerate(transform_list[i0: i1]): # Load ply format points data = read_ply(frame_names[i0 + i]) points = np.vstack((data['x'], data['y'], data['z'])).T # Apply transf world_pts = np.hstack((points, np.ones_like(points[:, :1]))) world_pts = np.matmul(world_pts, world_H.T) # Save frame world_pts[:, 3] = i0 + i all_points.append(world_pts) # also save trajectory traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1) traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i))) all_traj_pts.append(traj_pts.astype(np.float32)) all_traj_clrs.append(traj_clrs) last_saved_frames += save_group filename = 'debug_icp_{:05d}.ply'.format(i0) write_ply(filename, [np.vstack(all_points)], ['x', 'y', 'z', 't']) filename = 'debug_icp_{:05d}_traj.ply'.format(i0) write_ply(filename, [np.vstack(all_traj_pts), np.vstack(all_traj_clrs)], ['x', 'y', 'z', 't', 'red', 'green', 'blue']) ################# # Post processing ################# all_points = [] all_traj_pts = [] all_traj_clrs = [] i0 = last_saved_frames for i, world_H in enumerate(transform_list[i0:]): # Load ply format points data = read_ply(frame_names[i0 + i]) points = np.vstack((data['x'], data['y'], data['z'])).T # Apply transf world_pts = np.hstack((points, np.ones_like(points[:, :1]))) world_pts = np.matmul(world_pts, world_H.T) # Save frame world_pts[:, 3] = i0 + i all_points.append(world_pts) # also save trajectory traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1) traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i))) all_traj_pts.append(traj_pts.astype(np.float32)) all_traj_clrs.append(traj_clrs) last_saved_frames += save_group filename = 'debug_icp_{:05d}.ply'.format(i0) write_ply(filename, [np.vstack(all_points)], ['x', 'y', 'z', 't']) filename = 'debug_icp_{:05d}_traj.ply'.format(i0) write_ply(filename, [np.vstack(all_traj_pts), np.vstack(all_traj_clrs)], ['x', 'y', 'z', 't', 'red', 'green', 'blue']) return transform_list def bundle_slam(verbose=1): ############ # Parameters ############ # Path to data data_path = '../../Data/NCLT' gt_folder = 'ground_truth' raw_folder = 'raw_ply' days = np.sort([d for d in listdir(join(data_path, raw_folder))]) # Out files out_folder = join(data_path, 'day_ply') if not exists(out_folder): makedirs(out_folder) # Stride (nb of frames skipped for transformations) frame_stride = 2 # Bundle size (number of frames jointly optimized) and stride (nb of frames between each bundle start) bundle_size = 7 bundle_stride = bundle_size - 1 # Normal estimation parameters score_thresh = 0.99 # Pointcloud filtering parameters map_voxel_size = 0.05 frame_voxel_size = 0.05 # Group of frames saved together save_group = 100 ############### # Load GT poses ############### print('\nLoading days groundtruth poses...') t0 = time.time() gt_t, gt_H = load_gt_poses(join(data_path, gt_folder), only_day_1=True) t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) ####################### # Get lidar frame times ####################### # Focus on a particular point p0 = np.array([-220, -527, 12]) R0 = 20.0 print('\nGet timestamps in focused area...') t0 = time.time() day_f_times = get_area_frames(days, gt_t, gt_H, join(data_path, raw_folder), p0, R0, only_day_1=True) t2 = time.time() print('Done in {:.1f}s\n'.format(t2 - t0)) ########################### # coarse map with pt2pl icp ########################### for d, day in enumerate(days): # List of transformation we are trying to optimize frames_folder = join(data_path, 'raw_ply', day) f_times = [f_t for f_t in day_f_times[d][::frame_stride]] transform_list = [np.eye(4) for _ in f_times] last_saved_frames = 0 FPS = 0 N = len(f_times) for b_i, bundle_i0 in enumerate(np.arange(0, len(f_times), bundle_stride)): #################### # Load bundle frames #################### t = [time.time()] if (bundle_i0 + bundle_size > len(f_times)): bundle_i0 = len(f_times) - bundle_size frame_pts = [] frame_norms = [] frame_w = [] for f_t in f_times[bundle_i0:bundle_i0+bundle_size]: # Load ply format points f_name = '{:.0f}.ply'.format(f_t) data = read_ply(join(frames_folder, f_name)) points = np.vstack((data['x'], data['y'], data['z'])).T estimate_normals_planarity_debug(points) a = 1/0 t += [time.time()] # Get normals normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5) # Remove low quality normals for fitting points = points[norm_scores > score_thresh] normals = normals[norm_scores > score_thresh] norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh) t += [time.time()] # Subsample to reduce number of points if frame_voxel_size > 0: # grid supsampling points, normals = grid_subsampling(points, features=normals, sampleDl=map_voxel_size) # Renormalize normals normals = normals / np.linalg.norm(normals, axis=1, keepdims=True) # Filter out points according to main normal directions (NOt necessary if normals are better computed) bool_filter = normal_filtering(normals) > 0.5 points = points[bool_filter] normals = normals[bool_filter] norm_scores = norm_scores[bool_filter] t += [time.time()] # Compute score for each component of rotations / translation # Weights according to distance the futher, the higher (square rule because points lies on surfaces) #rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1) #weights = np.hstack((rot_scores, -rot_scores)) weights = np.expand_dims(norm_scores, 1) # Gather frames data frame_pts.append(points) frame_norms.append(normals) frame_w.append(weights) t += [time.time()] if verbose == 3: dt = np.array(t[1:]) - np.array(t[:-1]) dt = dt.reshape(bundle_size, -1) timing_names = ['Load', 'Normals', 'Filter', 'Append'] s = '' for t_name in timing_names: s += '{:^10s} '.format(t_name) s += '\n' for b in range(bundle_size): for t_i in range(len(timing_names)): s += '{:^10.1f} '.format(1000 * dt[b, t_i]) s += '\n' print(s) t = t[:1] t += [time.time()] ################## # Apply bundle ICP ################## # for b in range(bundle_size): # w_names = ['w{:d}'.format(i) for i in range(frame_w[b].shape[1])] # write_ply('bb_init_{:02d}.ply'.format(b), # [frame_pts[b], frame_w[b]], # ['x', 'y', 'z'] + w_names) bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts, frame_norms, frame_w, n_samples=1000, max_pairing_dist=0.2, max_iter=200, avg_steps=5) # TODO: Lidar scan cleanup. AFTER THE MAPPING # > In polar coordinate: retrieve each line of scan. like 1D grid subs, index in a 1D grid, adjust grid # by min max and nb of scan lines. r = log(r) pour la suite # > In each line, order points by phi. find jumps in r direction. get dr0 = r(j)-(j-1) and dr1 = r(j+1)-r(j) # IF dr0 = dr1 THEN we probably are on a plane keep the point. # IF abs(dr0-dr1) > Thresh THEN outlier, remove the point # # # TODO: Mapping # > Start with a frame to frame bundle adjustement (do 20 frames, between 5 and 10 meters)) # > Create map from these 20 frames (USe our smart spherical grid subs) # > ICP on the map # > Choose, update map or compute it again from 20 new frames (Better to update if possible) # TODO: motion distortion, use phi angle to get points timestamps, remembre frame stime stamp is the one of the # last points => unperiodicize phi, create linear interp function with last points and their angle Interpolate # pose based on points angle def get_odometry(sensor_path, day, t0, t1): odom_name = join(sensor_path, day, 'odometry_mu_100hz.csv') odom = np.loadtxt(odom_name, delimiter=",", dtype=np.float64) mask = np.logical_and(odom[:, 0] > t0, odom[:, 0] < t1) ssc = odom[mask, 1:] t = odom[mask, 0] H = ssc_to_homo(ssc, ssc_in_radians=True) return t, H
89,471
32,282
#!/usr/bin/env python __author__ = "Mari Wahl" __copyright__ = "Copyright 2014, The Cogent Project" __credits__ = ["Mari Wahl"] __license__ = "GPL" __version__ = "2.0" __maintainer__ = "Mari Wahl" __email__ = "marina.w4hl@gmail.com" import os from constants import SUBFOLDERS, FEATURES def create_input_files(subfolder): return '../../output/vectors_proc/' + subfolder + '.data' def create_output_files(): out = '../../output/' if not os.path.exists(out): os.makedirs(out) out_v = out + 'vectors_together/' if not os.path.exists(out_v): os.makedirs(out_v) return out_v + 'together.data' if __name__ == '__main__': output_file = create_output_files() # Loop saving the values for each file for subfolder in SUBFOLDERS: input_file = create_input_files(subfolder) print 'Processing ' + input_file + ' ...' tempfile = open(input_file, 'r') aux = tempfile.read() outfile = open(output_file, 'a') outfile.write(aux) tempfile.close() outfile.close() print '\nDone!!!'
1,128
410
# -*- coding: utf-8 -*- # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests.""" import mock import pytest from google.cloud import monitoring_v3 from google.cloud.monitoring_v3.proto import service_pb2 from google.cloud.monitoring_v3.proto import service_service_pb2 from google.protobuf import empty_pb2 class MultiCallableStub(object): """Stub for the grpc.UnaryUnaryMultiCallable interface.""" def __init__(self, method, channel_stub): self.method = method self.channel_stub = channel_stub def __call__(self, request, timeout=None, metadata=None, credentials=None): self.channel_stub.requests.append((self.method, request)) response = None if self.channel_stub.responses: response = self.channel_stub.responses.pop() if isinstance(response, Exception): raise response if response: return response class ChannelStub(object): """Stub for the grpc.Channel interface.""" def __init__(self, responses=[]): self.responses = responses self.requests = [] def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) class CustomException(Exception): pass class TestServiceMonitoringServiceClient(object): def test_create_service(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" expected_response = {"name": name, "display_name": display_name} expected_response = service_pb2.Service(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request parent = client.project_path("[PROJECT]") service = {} response = client.create_service(parent, service) assert expected_response == response assert len(channel.requests) == 1 expected_request = service_service_pb2.CreateServiceRequest( parent=parent, service=service ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_service_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request parent = client.project_path("[PROJECT]") service = {} with pytest.raises(CustomException): client.create_service(parent, service) def test_get_service(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" expected_response = {"name": name_2, "display_name": display_name} expected_response = service_pb2.Service(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request name = client.service_path("[PROJECT]", "[SERVICE]") response = client.get_service(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = service_service_pb2.GetServiceRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_service_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request name = client.service_path("[PROJECT]", "[SERVICE]") with pytest.raises(CustomException): client.get_service(name) def test_list_services(self): # Setup Expected Response next_page_token = "" services_element = {} services = [services_element] expected_response = {"next_page_token": next_page_token, "services": services} expected_response = service_service_pb2.ListServicesResponse( **expected_response ) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request parent = client.project_path("[PROJECT]") paged_list_response = client.list_services(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.services[0] == resources[0] assert len(channel.requests) == 1 expected_request = service_service_pb2.ListServicesRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_services_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request parent = client.project_path("[PROJECT]") paged_list_response = client.list_services(parent) with pytest.raises(CustomException): list(paged_list_response) def test_update_service(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" expected_response = {"name": name, "display_name": display_name} expected_response = service_pb2.Service(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request service = {} response = client.update_service(service) assert expected_response == response assert len(channel.requests) == 1 expected_request = service_service_pb2.UpdateServiceRequest(service=service) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_service_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request service = {} with pytest.raises(CustomException): client.update_service(service) def test_delete_service(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request name = client.service_path("[PROJECT]", "[SERVICE]") client.delete_service(name) assert len(channel.requests) == 1 expected_request = service_service_pb2.DeleteServiceRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_service_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request name = client.service_path("[PROJECT]", "[SERVICE]") with pytest.raises(CustomException): client.delete_service(name) def test_create_service_level_objective(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" goal = 317825.0 expected_response = {"name": name, "display_name": display_name, "goal": goal} expected_response = service_pb2.ServiceLevelObjective(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request parent = client.service_path("[PROJECT]", "[SERVICE]") service_level_objective = {} response = client.create_service_level_objective( parent, service_level_objective ) assert expected_response == response assert len(channel.requests) == 1 expected_request = service_service_pb2.CreateServiceLevelObjectiveRequest( parent=parent, service_level_objective=service_level_objective ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_service_level_objective_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request parent = client.service_path("[PROJECT]", "[SERVICE]") service_level_objective = {} with pytest.raises(CustomException): client.create_service_level_objective(parent, service_level_objective) def test_get_service_level_objective(self): # Setup Expected Response name_2 = "name2-1052831874" display_name = "displayName1615086568" goal = 317825.0 expected_response = {"name": name_2, "display_name": display_name, "goal": goal} expected_response = service_pb2.ServiceLevelObjective(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request name = client.service_level_objective_path( "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" ) response = client.get_service_level_objective(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = service_service_pb2.GetServiceLevelObjectiveRequest( name=name ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_service_level_objective_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request name = client.service_level_objective_path( "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" ) with pytest.raises(CustomException): client.get_service_level_objective(name) def test_list_service_level_objectives(self): # Setup Expected Response next_page_token = "" service_level_objectives_element = {} service_level_objectives = [service_level_objectives_element] expected_response = { "next_page_token": next_page_token, "service_level_objectives": service_level_objectives, } expected_response = service_service_pb2.ListServiceLevelObjectivesResponse( **expected_response ) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request parent = client.service_path("[PROJECT]", "[SERVICE]") paged_list_response = client.list_service_level_objectives(parent) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.service_level_objectives[0] == resources[0] assert len(channel.requests) == 1 expected_request = service_service_pb2.ListServiceLevelObjectivesRequest( parent=parent ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_service_level_objectives_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request parent = client.service_path("[PROJECT]", "[SERVICE]") paged_list_response = client.list_service_level_objectives(parent) with pytest.raises(CustomException): list(paged_list_response) def test_update_service_level_objective(self): # Setup Expected Response name = "name3373707" display_name = "displayName1615086568" goal = 317825.0 expected_response = {"name": name, "display_name": display_name, "goal": goal} expected_response = service_pb2.ServiceLevelObjective(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request service_level_objective = {} response = client.update_service_level_objective(service_level_objective) assert expected_response == response assert len(channel.requests) == 1 expected_request = service_service_pb2.UpdateServiceLevelObjectiveRequest( service_level_objective=service_level_objective ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_service_level_objective_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request service_level_objective = {} with pytest.raises(CustomException): client.update_service_level_objective(service_level_objective) def test_delete_service_level_objective(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup Request name = client.service_level_objective_path( "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" ) client.delete_service_level_objective(name) assert len(channel.requests) == 1 expected_request = service_service_pb2.DeleteServiceLevelObjectiveRequest( name=name ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_service_level_objective_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.ServiceMonitoringServiceClient() # Setup request name = client.service_level_objective_path( "[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]" ) with pytest.raises(CustomException): client.delete_service_level_objective(name)
18,361
5,082
from __future__ import absolute_import, division, print_function, unicode_literals import sqlite3 def fetchall(database, query, kwds): conn = sqlite3.connect(database) try: cursor = conn.execute(query, kwds) return cursor.fetchall() finally: conn.close()
294
87
#!/usr/bin/env python # -*- coding: utf-8 -*- # Common Python library imports import re import unicodedata # Pip package imports from flask_sqlalchemy.model import camel_to_snake_case from flask import current_app from itsdangerous import URLSafeSerializer, BadData from loguru import logger # Internal package imports from .decorators import was_decorated_without_parenthesis, wrap_decorator #from .mail import send_mail, prepare_mail, send_mail_sync def slugify(string): string = re.sub(r'[^\w\s-]', '', unicodedata.normalize('NFKD', string.strip())) return re.sub(r'[-\s]+', '-', string).lower() def title_case(string): return camel_to_snake_case(string).replace('_', ' ').title() def pluralize(name): if name.endswith('y'): # right replace 'y' with 'ies' return 'ies'.join(name.rsplit('y', 1)) elif name.endswith('s'): return f'{name}es' return f'{name}s' def string_to_bool(s): if isinstance(s, str): if s.lower() in [ 'true', 'yes', 'y', '1', 'ye', 't' ]: return True elif s.lower() in [ 'false', 'no', 'n', '0', 'f' ]: return False return def listify(obj): if not isinstance(obj, (tuple, list)): return [obj] return obj def decode_token(token): """ Decode the token to retrive the encoded data """ s = URLSafeSerializer(current_app.secret_key, salt=current_app.config['SECURITY_PASSWORD_SALT']) try: return s.loads(token) except BadData as e: logger.error(e) return None def encode_token(data): """ Encode a data and return with the encoded token """ s = URLSafeSerializer(current_app.secret_key, salt=current_app.config['SECURITY_PASSWORD_SALT']) return s.dumps(data)
1,775
603
from typing import List import torch from detectron2.structures import ImageList, Boxes, Instances, pairwise_iou from detectron2.modeling.box_regression import Box2BoxTransform from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads from .utils import get_aligned_pooler, label_and_sample_proposals from .lazy_fast_rcnn import LazyFastRCNNOutputLayers @ROI_HEADS_REGISTRY.register() class LazyRoIHeads(StandardROIHeads): @torch.no_grad() def label_and_sample_proposals( self, proposals: List[Instances], targets: List[Instances] ) -> List[Instances]: return label_and_sample_proposals(self, proposals, targets) @classmethod def _init_box_head(cls, cfg, input_shape): ret = super()._init_box_head(cfg, input_shape) ret["box_predictor"] = LazyFastRCNNOutputLayers( cfg, ret["box_head"].output_shape, # The loss weight is set as Cascade RPN loss_weight={ "loss_cls": 1.5, "loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT }, ) ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES ret["box_pooler"] = get_aligned_pooler( cfg.MODEL.RPN, input_shape, output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION, sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO, ) return ret @ROI_HEADS_REGISTRY.register() class LazyCascadeRoIHeads(CascadeROIHeads): @torch.no_grad() def label_and_sample_proposals( self, proposals: List[Instances], targets: List[Instances] ) -> List[Instances]: return label_and_sample_proposals(self, proposals, targets) @classmethod def _init_box_head(cls, cfg, input_shape): ret = super()._init_box_head(cfg, input_shape) cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS box_predictors = [] for bbox_reg_weights in cascade_bbox_reg_weights: box_predictors.append( LazyFastRCNNOutputLayers( cfg, ret["box_heads"][0].output_shape, box2box_transform=Box2BoxTransform(weights=bbox_reg_weights), loss_weight={ "loss_cls": 1.5, "loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT }, ) ) ret["box_predictors"] = box_predictors ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES ret["box_pooler"] = get_aligned_pooler( cfg.MODEL.RPN, input_shape, output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION, sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO, ) return ret def _match_and_label_boxes(self, proposals, stage, targets): return label_and_sample_proposals(self, proposals, targets, False, False, stage)
3,028
1,073
# coding=utf-8 # tensorflow tf.contrib.data api test import tensorflow as tf # file path filename = '' batch_size = 100 aa = (tf.contrib.data.TextLineDataset(filename) .map((lambda line: tf.decode_csv(line, record_defaults=[['1'], ['1'], ['1']], field_delim='\t'))) .shuffle(buffer_size=1000) .batch_size(batch_size) )
350
133
''' 实验名称:音频播放 版本:v1.0 日期:2020.12 作者:01Studio 说明:MP3/WAV音频文件播放。使用物理按键控制 ''' #导入相关模块 import audio,time from pyb import Switch from machine import Pin #构建音频对象 wm=audio.WM8978() vol = 80 #音量初始化,80 ###################### # 播放 USR按键 ###################### play_flag = 0 def music_play(): global play_flag play_flag = 1 sw =Switch() sw.callback(music_play) ###################### # 音量加 A0按键 ###################### VOL_U = Pin('A0',Pin.IN,Pin.PULL_UP) #构建按键A0 vol_up_flag = 0 def vol_up(VOL_U): global vol #消除按键抖动 if VOL_U.value() == 0: time.sleep_ms(10) if VOL_U.value() == 0: vol=vol+10 if vol > 100: vol = 100 wm.volume(vol) VOL_U.irq(vol_up,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发 ###################### # 音量减 E3按键 ###################### VOL_D = Pin('E3',Pin.IN,Pin.PULL_UP) #构建按键A0 vol_down_flag = 0 def vol_down(VOL_D): global vol #消除按键抖动 if VOL_D.value() == 0: time.sleep_ms(10) if VOL_D.value() == 0: vol=vol-10 if vol < 10: vol = 10 wm.volume(vol) VOL_D.irq(vol_down,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发 #加载音乐 wm.load('/flash/music/Seasons In The Sun.mp3') while True: #播放音乐 if play_flag == 1: wm.play() play_flag = 0
1,333
706
#!/usr/bin/env python import rospy from nav_msgs.msg import Odometry lin_min = 0.0 lin_max = 0.0 ang_min = 0.0 ang_max = 0.0 def odom_cb(msg): global lin_min, lin_max, ang_min, ang_max if lin_min > msg.twist.twist.linear.x: lin_min = msg.twist.twist.linear.x if lin_max < msg.twist.twist.linear.x: lin_max = msg.twist.twist.linear.x if ang_min > msg.twist.twist.angular.z: ang_min = msg.twist.twist.angular.z if ang_max < msg.twist.twist.angular.z: ang_max = msg.twist.twist.angular.z rospy.loginfo('linear: [%f, %f] angular: [%f, %f]', lin_min, lin_max, ang_min, ang_max) def main(): rospy.init_node('min_max_finder', anonymous=True) rospy.Subscriber('odom_comb', Odometry, odom_cb) rospy.loginfo('min_max_finde node ready and listening. now use teleop to move your robot to the limits!') rospy.spin() if __name__ == '__main__': main()
942
396
# this file is to store all custom classes import tkinter as tk # class to store tkinter window properties # font: tk font dictionary {family, size, weight, slant, underline, overstrike} # font color: string # nrows: the number of rows of lyric displayed (integer greater than 0) # width: window width (int greater than 0) # transparency: window transparency level (0.2 to 1) # bg_color: [transparency: 1 or 2, background color (string), transparent color (string)] # bd: border width in integers # on_top: whether window is kept on top of all other windows (Boolean) # x_pos, y_pos: window's x and y coordinates in pixels class WindowProperties: def __init__(self, font, font_color, font_color_bg, nrows, width, transparency, bg_color, bd, on_top, x_pos, y_pos): self.font = font self.font_color = font_color self.font_color_bg = font_color_bg self.nrows = nrows self.width = width self.transparency = transparency self.bg_color = bg_color self.bd = bd self.on_top = on_top self.x_pos = x_pos self.y_pos = y_pos def save(self, file_path): with open(file_path, 'w') as f: f.write(str(self.font['family']) + '\n') f.write(str(self.font['size']) + '\n') f.write(str(self.font['weight']) + '\n') f.write(str(self.font['slant']) + '\n') f.write(str(self.font['underline']) + '\n') f.write(str(self.font['overstrike']) + '\n') f.write(str(self.font_color) + '\n') f.write(str(self.font_color_bg) + '\n') f.write(str(self.nrows) + '\n') f.write(str(self.width) + '\n') f.write(str(self.transparency) + '\n') f.write(str(self.bg_color) + '\n') f.write(str(self.bd) + '\n') f.write(str(self.on_top) + '\n') f.write(str(self.x_pos) + '\n') f.write(str(self.y_pos) + '\n') # helper function for title bar to save setting and then close window def close_root(root, win_properties): win_properties.save('cache/user_setting.txt') root.destroy() # custom title bar class class TitleBar: # initialization takes x starting position, y starting position, and window def __init__(self, last_click_x, last_click_y, root, win_properties): # initialize title_bar, close button, and label self.title_bar = tk.Frame(root, bg='#2e2e2e', relief='groove', bd=0, highlightthickness=0) self.close_button = tk.Button(self.title_bar, text='×', bg="#2e2e2e", padx=5, activebackground='red', bd=0, font="bold", fg='white', command=lambda: close_root(root, win_properties)) self.close_button.grid(row=0, column=1, sticky='E') self.title_text = tk.Label(self.title_bar, text='', bg='#2e2e2e', padx=5, fg='white') self.title_text.grid(row=0, column=0, sticky='W') self.title_bar.grid_columnconfigure(0, weight=1) # bind closing and drag self.last_click_x = last_click_x self.last_click_y = last_click_y self.title_bar.bind('<Button-1>', self.save_last_click) self.title_bar.bind('<B1-Motion>', lambda event: self.drag(event, root, win_properties)) self.title_text.bind('<Button-1>', self.save_last_click) self.title_text.bind('<B1-Motion>', lambda event: self.drag(event, root, win_properties)) # update title function def title(self, title_text): self.title_text.config(text=title_text) # update last position to help with drag function def save_last_click(self, event): self.last_click_x = event.x self.last_click_y = event.y # drag function def drag(self, event, root, win_properties): x, y = event.x - self.last_click_x + root.winfo_x(), event.y - self.last_click_y + root.winfo_y() root.geometry("+%s+%s" % (x, y)) win_properties.x_pos = x win_properties.y_pos = y # class to store song information # song: Spotify current song data (see spotify_func.py for format) # lyric: current lyric used (lrc file in string format) # search_result: list of lyric results scraped from website ([[song, link, singer]..]) # lyric_offset: number of ms to offset lyrics by when displayed (integer) # nlyric: the lyric currently being used from search_result (integer between 0 and len(search_result) - 1) # dynamic lyric position: to track which letter should be highlighted a different color, integer # dynamic lyric duration: to track how frequent the lyric update function should be refreshed, in ms # lyric_original: original lyric (not formatted) to be saved to cache class SongProperties: def __init__(self, songx, lyric_f, lyric_o, search_resultx, lyric_offsetx, nlyricx): self.song = songx self.lyric = lyric_f self.search_result = search_resultx self.lyric_offset = lyric_offsetx self.nlyric = nlyricx self.dynamic_lyric_pos = 0 self.dynamic_lyric_duration = 100 self.lyric_original = lyric_o
5,076
1,668
""" Unit test utilities. """ import textwrap def clean_multiline_string( multiline_string, sep='\n' ): """ Dedent, split, remove first and last empty lines, rejoin. """ multiline_string = textwrap.dedent( multiline_string ) string_list = multiline_string.split( sep ) if not string_list[0]: string_list = string_list[1:] if not string_list[-1]: string_list = string_list[:-1] # return '\n'.join( docstrings ) return ''.join([ ( s + '\n' ) for s in string_list ]) __all__ = ( "clean_multiline_string", )
563
201
import asyncio import json def test_chunked_messages(plugin, read): request = { "jsonrpc": "2.0", "method": "install_game", "params": { "game_id": "3" } } message = json.dumps(request).encode() + b"\n" read.side_effect = [message[:5], message[5:], b""] asyncio.run(plugin.run()) plugin.install_game.assert_called_with(game_id="3") def test_joined_messages(plugin, read): requests = [ { "jsonrpc": "2.0", "method": "install_game", "params": { "game_id": "3" } }, { "jsonrpc": "2.0", "method": "launch_game", "params": { "game_id": "3" } } ] data = b"".join([json.dumps(request).encode() + b"\n" for request in requests]) read.side_effect = [data, b""] asyncio.run(plugin.run()) plugin.install_game.assert_called_with(game_id="3") plugin.launch_game.assert_called_with(game_id="3") def test_not_finished(plugin, read): request = { "jsonrpc": "2.0", "method": "install_game", "params": { "game_id": "3" } } message = json.dumps(request).encode() # no new line read.side_effect = [message, b""] asyncio.run(plugin.run()) plugin.install_game.assert_not_called()
1,387
476
import os import unittest from ....BaseTestCase import BaseTestCase from kombi.Crawler import Crawler from kombi.Crawler.Fs.Image import ImageCrawler from kombi.Crawler.PathHolder import PathHolder class ImageCrawlerTest(BaseTestCase): """Test Image crawler.""" __singleFile = os.path.join(BaseTestCase.dataTestsDirectory(), "test.dpx") __sequenceFile = os.path.join(BaseTestCase.dataTestsDirectory(), "testSeq.0001.exr") def testSingleImage(self): """ Test that the crawler created for a single image is based on the image crawler. """ crawler = Crawler.create(PathHolder(self.__singleFile)) self.assertIsInstance(crawler, ImageCrawler) def testSequenceImage(self): """ Test that the crawler created for a sequence image is based on the image crawler. """ crawler = Crawler.create(PathHolder(self.__sequenceFile)) self.assertIsInstance(crawler, ImageCrawler) def testGroupTagSequence(self): """ Test that the tag group has been assigned to the image sequence crawler. """ crawler = Crawler.create(PathHolder(self.__sequenceFile)) self.assertIn('group', crawler.tagNames()) self.assertEqual(crawler.tag('group'), "testSeq.####.exr") def testGroupSprintfTagSequence(self): """ Test that the tag groupSprintf has been assigned to the image sequence crawler. """ crawler = Crawler.create(PathHolder(self.__sequenceFile)) self.assertIn('groupSprintf', crawler.tagNames()) self.assertEqual(crawler.tag('groupSprintf'), "testSeq.%04d.exr") def testGroupTagSingle(self): """ Test that the tag group has not been assigned to a single image crawler. """ crawler = Crawler.create(PathHolder(self.__singleFile)) self.assertNotIn('group', crawler.tagNames()) def testGroupSprintfTagSingle(self): """ Test that the tag groupSprintf has not been assigned to a single image crawler. """ crawler = Crawler.create(PathHolder(self.__singleFile)) self.assertNotIn('groupSprintf', crawler.tagNames()) def testIsSequence(self): """ Test if a crawler is a sequence. """ singleCrawler = Crawler.create(PathHolder(self.__singleFile)) sequenceCrawler = Crawler.create(PathHolder(self.__sequenceFile)) self.assertEqual(singleCrawler.isSequence(), False) self.assertEqual(singleCrawler.var("imageType"), "single") self.assertEqual(sequenceCrawler.isSequence(), True) self.assertEqual(sequenceCrawler.var("imageType"), "sequence") if __name__ == "__main__": unittest.main()
2,731
796
#!/usr/bin/env python import unittest, asyncio, asynctest, websockets, json from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list from remote_params.WebsocketServer import WebsocketServer class MockSocket: def __init__(self): self.close_count = 0 self.msgs = [] def close(self): self.close_count += 1 async def send(self, msg): self.msgs.append(msg) class TestWebsocketServer(asynctest.TestCase): def setUp(self): self.params = params = Params() self.p1 = params.int('some_int') self.p1.set(0) self.wss = WebsocketServer(Server(self.params), start=False) def tearDown(self): self.wss.stop() def test_default_port(self): self.assertEqual(self.wss.port, 8081) async def test_connects_only_one_remote(self): self.assertEqual(len(self.wss.server.connected_remotes), 0) await self.wss.start_async() self.assertEqual(len(self.wss.server.connected_remotes), 1) uri = f'ws://localhost:{self.wss.port}' async with websockets.connect(uri) as websocket: self.assertEqual(len(self.wss.server.connected_remotes), 1) async with websockets.connect(uri) as websocket: self.assertEqual(len(self.wss.server.connected_remotes), 1) self.assertEqual(len(self.wss.server.connected_remotes), 1) self.assertEqual(len(self.wss.server.connected_remotes), 1) self.wss.stop() self.assertEqual(len(self.wss.server.connected_remotes), 0) async def test_incoming_value(self): await self.wss._onMessage(f'POST /some_int?value={3}', None) self.assertEqual(self.p1.value, 0) # server not started await self.wss.start_async() await self.wss._onMessage(f'POST /some_int?value={4}', None) self.assertEqual(self.p1.value, 4) # param changed await self.wss._onMessage(f'POST /wrong_int?value={5}', None) self.assertEqual(self.p1.value, 4) # wrong url self.wss.stop() await self.wss._onMessage(f'POST /wrong_int?value={6}', None) self.assertEqual(self.p1.value, 4) # server stopped async def test_stop_message(self): mocksock = MockSocket() await self.wss._onMessage('stop', mocksock) self.assertEqual(mocksock.close_count, 1) async def test_responds_to_schema_request_with_schema_json(self): mocksocket = MockSocket() await self.wss._onMessage(f'GET schema.json', mocksocket) # verify responded with schema json self.assertEqual(mocksocket.msgs, [ f'POST schema.json?schema={json.dumps(schema_list(self.params))}' ]) async def test_broadcasts_value_changes(self): await self.wss.start_async() # connect client uri = f'ws://127.0.0.1:{self.wss.port}' async with websockets.connect(uri) as ws: # receive welcome message msg = await ws.recv() self.assertEqual(msg, 'welcome to pyRemoteParams websockets') # change parameter value self.p1.set(2) # receive parameter value change msg = await ws.recv() self.assertEqual(msg, 'POST /some_int?value=2') async def test_broadcasts_schema_change(self): await self.wss.start_async() # connect client uri = f'ws://127.0.0.1:{self.wss.port}' async with websockets.connect(uri) as ws: # receive welcome message msg = await ws.recv() self.assertEqual(msg, 'welcome to pyRemoteParams websockets') # change schema layout value self.params.string('name') # receive parameter value change msg = await ws.recv() self.assertEqual(msg, f'POST schema.json?schema={json.dumps(schema_list(self.params))}') # run just the tests in this file if __name__ == '__main__': unittest.main()
3,688
1,303
import warnings import pandas as pd from nltk.sentiment.vader import SentimentIntensityAnalyzer warnings.filterwarnings("ignore") def classify_comments(text_file, page_name): """ Description: This function recives a text file and convert it into csv file to enable to label the comments inside that file, also this function use the nltk library whuch called vader to be enable to give percentages for positive,negative and neutral impact Args: text_file:text file Returns: DataFrames: contains classified data with positive | negative | nutral labels for each comment """ # nltk.download("vader_lexicon") df = pd.read_csv("%s" % text_file, names=["comments"], sep="\t") # Cleaning data from emails,number and special characters to be more accurate df["comments"] = df["comments"].str.replace("^\d+\s|\s\d+\s|\s\d+$", " ") df["comments"] = df["comments"].str.replace('"', "") df["comments"] = df["comments"].str.replace("*", "") df["comments"] = df["comments"].str.replace("/[^@\s]*@[^@\s]*\.[^@\s]*/", "") df["comments"] = df["comments"].str.replace( '"/[a-zA-Z]*[:\/\/]*[A-Za-z0-9\-_]+\.+[A-Za-z0-9\.\/%&=\?\-_]+/i"', "" ) df["comments"] = df["comments"].str.replace( "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))", "", ) df["comments"] = df["comments"].str.replace("https://", "") df["comments"] = df["comments"].str.replace(r"\d+(\.\d+)?", "") sid = SentimentIntensityAnalyzer() new_words = { "over": -0.5, "garbage": -2.0, "dumpster": -3.1, ":(": -1, "refuses": -1, "down": -1, "crashed": -2, "Amen": 1, "Available": 1, "#Save": 1, "always": 0.5, } sid.lexicon.update(new_words) # Create new coloums for positive and negative percentages df["impactPers"] = df["comments"].apply( lambda comments: sid.polarity_scores(comments) ) df["posPers"] = df["impactPers"].apply(lambda score_dict: score_dict["pos"]) df["negPers"] = df["impactPers"].apply(lambda score_dict: score_dict["neg"]) df["neuPers"] = df["impactPers"].apply(lambda score_dict: score_dict["neu"]) df["comPers"] = df["impactPers"].apply(lambda score_dict: score_dict["compound"]) # Labeling the data depending on the above persentages def label_race(row): """ This is a helper function that gives a positive or negative impact for each comment based on the persentages Args: row :String Returns: String (N) or Integer """ if row["comPers"] >= 0.02: return 1 elif row["comPers"] <= -0.02: return 0 else: return "N" # Create new coloumn for the final labels df["labels"] = df.apply(lambda row: label_race(row), axis=1) # Create new file containing two coloumns new_df = df[["comments", "labels"]] create_dir(page_name) new_df.to_csv("./data/%s/classified_comments.txt" % page_name) return new_df def create_dir(page_name): """ make a new directory for non-existing page data directory Args: page_name (str) Returns: [boolen]: return True if the directory not exist and make it return False if the directory exist """ import os dir_path = "./data/%s" % page_name.lower() if not os.path.isdir(dir_path): os.mkdir(dir_path) return True else: return False
3,592
1,210
import pytest @pytest.mark.parametrize( "script", (None, "test.py"), ) def test_restart_app(mocked_android_modules, app_instance, test_py_script, script): from android_here import restart_app restart_app(script) def test_script_path_resolved(mocked_android_modules, app_instance, test_py_script): from android_here import resolve_script_path path = resolve_script_path("test.py") assert path.startswith("/") and path.endswith("test.py") def test_absolute_script_path_resolved(mocked_android_modules, app_instance, test_py_script): from android_here import resolve_script_path assert resolve_script_path(test_py_script) == test_py_script def test_pin_shortcut(mocker, mocked_android_modules, app_instance, test_py_script): from android_here import pin_shortcut pin_shortcut("test.py", "test label")
846
277
import sys sys.path.append('..') from Network.facenet.API import build_face_manager, detetion_and_recongnize ''' You have to build face manager before you start detection and recongnize. ''' face_manager = build_face_manager(r"face/dataset/path") result = detetion_and_recongnize(face_manager, r"images/path/you/want/to/detect") print(result)
346
120
class DispositivoEntrada: def __init__(self, marca, tipo_entrada): self._marca = marca self.tipo_entrada = tipo_entrada
149
51
from django.urls import path from .views import HomepageView urlpatterns = [ path('', HomepageView.as_view(), name='home'), ]
132
43
import copy import logging import warnings from kolibri.plugins.registry import registered_plugins logger = logging.getLogger(__name__) def __validate_config_option( section, name, base_config_spec, plugin_specs, module_path ): # Raise an error if someone tries to overwrite a base option # except for the default value. if section in base_config_spec: if name in base_config_spec[section]: raise ValueError("Cannot overwrite a core Kolibri options spec option") # Warn if a plugin tries to add an option that another plugin has already added if section in plugin_specs: if name in plugin_specs[section]: warnings.warn( "{plugin} set an option {option} in section {section} but {plugins} had already set it".format( plugin=module_path, plugins=", ".join(plugin_specs[section][name]), option=name, section=section, ) ) plugin_specs[section][name].append(module_path) else: # If not create the list for this option name # to track this and future modifications plugin_specs[section][name] = [module_path] else: # If not create the dict for the section # and the list for this option name plugin_specs[section] = {name: [module_path]} def __process_config_spec( option_spec, base_config_spec, plugin_specs, module_path, final_spec ): for section, opts in option_spec.items(): for name, attrs in opts.items(): __validate_config_option( section, name, base_config_spec, plugin_specs, module_path ) if section not in final_spec: final_spec[section] = {} final_spec[section][name] = attrs def __validate_option_default(section, name, plugin_default_overrides, module_path): # Warn if a plugin tries to add an option that another plugin has already added if section in plugin_default_overrides: if name in plugin_default_overrides[section]: warnings.warn( "{plugin} set an option default {option} in section {section} but {plugins} had already set it".format( plugin=module_path, plugins=", ".join(plugin_default_overrides[section][name]), option=name, section=section, ) ) plugin_default_overrides[section][name].append(module_path) else: # If not create the list for this option name # to track this and future modifications plugin_default_overrides[section][name] = [module_path] else: # If not create the dict for the section # and the list for this option name plugin_default_overrides[section] = {name: [module_path]} def __process_option_defaults( option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec ): for section, opts in option_defaults.items(): for name, default in opts.items(): __validate_option_default( section, name, plugin_default_overrides, module_path ) if section not in final_spec: logger.error( "Tried to set a new default in section {}, but this is not a valid section".format( section ) ) continue if name in final_spec[section]: # This is valid, so set a default # Note that we do not validation here for now, # so it is up to the user to ensure the default value # is kosher. final_spec[section][name]["default"] = default else: logger.error( "Tried to set a new default in section {}, for option {} but this is not a valid option".format( section, name ) ) def extend_config_spec(base_config_spec): plugin_specs = {} final_spec = copy.deepcopy(base_config_spec) # First process options config spec additions for plugin_instance in registered_plugins: plugin_options = plugin_instance.options_module if plugin_options and hasattr(plugin_options, "option_spec"): module_path = plugin_instance.module_path option_spec = plugin_options.option_spec __process_config_spec( option_spec, base_config_spec, plugin_specs, module_path, final_spec ) # Now process default value overrides, do this second in order to allow plugins # to override default values for other plugins! plugin_default_overrides = {} for plugin_instance in registered_plugins: plugin_options = plugin_instance.option_defaults_module if plugin_options and hasattr(plugin_options, "option_defaults"): module_path = plugin_instance.module_path option_defaults = plugin_options.option_defaults __process_option_defaults( option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec, ) return final_spec
5,394
1,346
from io import open import time import math import torch import torch.nn.functional as F from config import MAX_LENGTH from config import SOS_token from config import EOS_token from config import device class Lang: def __init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"}#首尾符号 self.n_words = 2 # Count SOS and EOS def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 def readLangs(lang1, lang2, reverse=False): # 读取数据放入列表 lines = open('data/chatdata_all.txt', encoding='utf-8').\ read().strip().split('\n') # 数据处理,处理成一对对的样本 pairs = [[s for s in l.split('@@')] for l in lines] print(pairs) # Reverse if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs def filterPair(p): return len(p[0].split(' ')) < MAX_LENGTH and \ len(p[1].split(' ')) < MAX_LENGTH def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] def prepareData(lang1, lang2, reverse=False): input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse) print("Read %s sentence pairs" % len(pairs)) pairs = filterPairs(pairs) print("Trimmed to %s sentence pairs" % len(pairs)) print("Counting words...") for pair in pairs: input_lang.addSentence(pair[0]) output_lang.addSentence(pair[1]) print("Counted words:") print(input_lang.name, input_lang.n_words) print(output_lang.name, output_lang.n_words) return input_lang, output_lang, pairs #句子转index def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] #句子转tensor def tensorFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) indexes.append(EOS_token) return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1) #句子对转index def tensorsFromPair(pair, input_lang, output_lang): input_tensor = tensorFromSentence(input_lang, pair[0]) target_tensor = tensorFromSentence(output_lang, pair[1]) return (input_tensor, target_tensor) def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
2,909
1,032
import asyncio import json import os from datetime import datetime from src.common import log_utils, pushshift from src.common.filesystem import S3FileSystem, StubFileSystem from src.common.lambda_context import local_lambda_invocation def handler(event, context): logger = log_utils.get_logger("archive-comments-lambda") submission_id = event["Records"][0]["Sns"]["Message"] if context is local_lambda_invocation: filesystem = StubFileSystem() else: filesystem = S3FileSystem(os.environ.get("ARCHIVE_DATA_BUCKET")) return asyncio.get_event_loop().run_until_complete(handle(submission_id, filesystem, logger)) async def handle(submission_id, filesystem, logger): logger.info(f"Archiving comments for {submission_id}...") comment_ids = await get_comment_ids(submission_id) comments = await get_comments(comment_ids) await filesystem.mkdir(submission_id) data = json.dumps(comments, ensure_ascii=True, indent=4) await filesystem.write(f"{submission_id}/comments.json", data) return { "statusCode": 200, "body": json.dumps({ "submission_id": submission_id, "last_updated": datetime.utcnow().timestamp() }) } async def get_comment_ids(submission_id): comment_ids = await pushshift.request(f"submission/comment_ids/{submission_id}") return comment_ids async def get_comments(comment_ids): if comment_ids is None: return [] chunk_size = 256 id_chunks = [comment_ids[x: x + chunk_size] for x in range(0, len(comment_ids), chunk_size)] comments = [] for id_chunk in id_chunks: ids = ",".join(id_chunk) chunk = await pushshift.request("search/comment", ids=ids) comments.extend(chunk) return comments if __name__ == "__main__": with open("event.json", "r") as file: event = json.load(file) handler(event, local_lambda_invocation)
1,941
618
import os import sys import json import argparse import progressbar from pathlib import Path from random import shuffle from time import time import torch from cpc.dataset import findAllSeqs from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch from cpc.criterion.clustering import kMeanCluster #from cpc.criterion.research.clustering import kMeanCluster def readArgs(pathArgs): print(f"Loading args from {pathArgs}") with open(pathArgs, 'r') as file: args = argparse.Namespace(**json.load(file)) return args def loadClusterModule(pathCheckpoint, norm_vec_len=False): print(f"Loading ClusterModule at {pathCheckpoint}") state_dict = torch.load(pathCheckpoint) if "state_dict" in state_dict: #kmeans clusterModule = kMeanCluster(torch.zeros(1, state_dict["n_clusters"], state_dict["dim"]), norm_vec_len) clusterModule.load_state_dict(state_dict["state_dict"]) else: #dpmeans clusterModule = kMeanCluster(state_dict["mu"]) clusterModule = clusterModule.cuda() return clusterModule def parseArgs(argv): # Run parameters parser = argparse.ArgumentParser(description='Quantize audio files using CPC Clustering Module.') parser.add_argument('pathCheckpoint', type=str, help='Path to the clustering checkpoint.') parser.add_argument('pathDB', type=str, help='Path to the dataset that we want to quantize.') parser.add_argument('pathOutput', type=str, help='Path to the output directory.') parser.add_argument('--pathSeq', type=str, help='Path to the sequences (file names) to be included used.') parser.add_argument('--split', type=str, default=None, help="If you want to divide the dataset in small splits, specify it " "with idxSplit-numSplits (idxSplit > 0), eg. --split 1-20.") parser.add_argument('--file_extension', type=str, default=".flac", help="Extension of the audio files in the dataset (default: .flac).") parser.add_argument('--max_size_seq', type=int, default=10240, help='Maximal number of frames to consider ' 'when computing a batch of features (defaut: 10240).') parser.add_argument('--batch_size', type=int, default=8, help='Batch size used to compute features ' 'when computing each file (defaut: 8).') parser.add_argument('--strict', type=bool, default=True, help='If activated, each batch of feature ' 'will contain exactly max_size_seq frames (defaut: True).') parser.add_argument('--debug', action='store_true', help="Load only a very small amount of files for " "debugging purposes.") parser.add_argument('--nobatch', action='store_true', help="Don't use batch implementation of when building features." "NOTE: This can have better quantized units as we can set " "model.gAR.keepHidden = True (line 162), but the quantization" "will be a bit longer.") parser.add_argument('--recursionLevel', type=int, default=1, help='Speaker level in pathDB (defaut: 1). This is only helpful' 'when --separate-speaker is activated.') parser.add_argument('--separate-speaker', action='store_true', help="Separate each speaker with a different output file.") parser.add_argument('--norm_vec_len', action='store_true', help="Normalize vector lengths.") return parser.parse_args(argv) def main(argv): # Args parser args = parseArgs(argv) print("=============================================================") print(f"Quantizing data from {args.pathDB}") print("=============================================================") # Check if directory exists if not os.path.exists(args.pathOutput): print("") print(f"Creating the output directory at {args.pathOutput}") Path(args.pathOutput).mkdir(parents=True, exist_ok=True) # Get splits if args.split: assert len(args.split.split("-"))==2 and int(args.split.split("-")[1]) >= int(args.split.split("-")[0]) >= 1, \ "SPLIT must be under the form idxSplit-numSplits (numSplits >= idxSplit >= 1), eg. --split 1-20" idx_split, num_splits = args.split.split("-") idx_split = int(idx_split) num_splits = int(num_splits) # Find all sequences print("") print(f"Looking for all {args.file_extension} files in {args.pathDB} with speakerLevel {args.recursionLevel}") seqNames, speakers = findAllSeqs(args.pathDB, speaker_level=args.recursionLevel, extension=args.file_extension, loadCache=True) if args.pathSeq: with open(args.pathSeq, 'r') as f: seqs = set([x.strip() for x in f]) filtered = [] for s in seqNames: if s[1].split('/')[-1].split('.')[0] in seqs: filtered.append(s) seqNames = filtered print(f"Done! Found {len(seqNames)} files and {len(speakers)} speakers!") if args.separate_speaker: seqNames_by_speaker = {} for seq in seqNames: speaker = seq[1].split("/")[args.recursionLevel-1] if speaker not in seqNames_by_speaker: seqNames_by_speaker[speaker] = [] seqNames_by_speaker[speaker].append(seq) # Check if output file exists if not args.split: nameOutput = "quantized_outputs.txt" else: nameOutput = f"quantized_outputs_split_{idx_split}-{num_splits}.txt" if args.separate_speaker is False: outputFile = os.path.join(args.pathOutput, nameOutput) assert not os.path.exists(outputFile), \ f"Output file {outputFile} already exists !!!" # Get splits if args.split: startIdx = len(seqNames) // num_splits * (idx_split-1) if idx_split == num_splits: endIdx = len(seqNames) else: endIdx = min(len(seqNames) // num_splits * idx_split, len(seqNames)) seqNames = seqNames[startIdx:endIdx] print("") print(f"Quantizing split {idx_split} out of {num_splits} splits, with {len(seqNames)} files (idx in range({startIdx}, {endIdx})).") # Debug mode if args.debug: nsamples=20 print("") print(f"Debug mode activated, only load {nsamples} samples!") # shuffle(seqNames) seqNames = seqNames[:nsamples] # Load Clustering args assert args.pathCheckpoint[-3:] == ".pt" if os.path.exists(args.pathCheckpoint[:-3] + "_args.json"): pathConfig = args.pathCheckpoint[:-3] + "_args.json" elif os.path.exists(os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")): pathConfig = os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json") else: assert False, \ f"Args file not found in the directory {os.path.dirname(args.pathCheckpoint)}" clustering_args = readArgs(pathConfig) print("") print(f"Clutering args:\n{json.dumps(vars(clustering_args), indent=4, sort_keys=True)}") print('-' * 50) # Load CluterModule clusterModule = loadClusterModule(args.pathCheckpoint, norm_vec_len=args.norm_vec_len) clusterModule.cuda() # Load FeatureMaker print("") print("Loading CPC FeatureMaker") if 'level_gru' in vars(clustering_args) and clustering_args.level_gru is not None: updateConfig = argparse.Namespace(nLevelsGRU=clustering_args.level_gru) else: updateConfig = None model = loadModel([clustering_args.pathCheckpoint], updateConfig=updateConfig)[0] ## If we don't apply batch implementation, we can set LSTM model to keep hidden units ## making the quality of the quantized units better if args.nobatch: model.gAR.keepHidden = True featureMaker = FeatureModule(model, clustering_args.encoder_layer) if clustering_args.dimReduction is not None: dimRed = loadDimReduction(clustering_args.dimReduction, clustering_args.centroidLimits) featureMaker = torch.nn.Sequential(featureMaker, dimRed) if not clustering_args.train_mode: featureMaker.eval() featureMaker.cuda() def feature_function(x): if args.nobatch is False: res0 = buildFeature_batch(featureMaker, x, seqNorm=False, strict=args.strict, maxSizeSeq=args.max_size_seq, batch_size=args.batch_size) if args.norm_vec_len: # [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end res0Lengths = torch.sqrt((res0*res0).sum(2)) res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1) return res0 else: res0 = buildFeature(featureMaker, x, seqNorm=False, strict=args.strict) if args.norm_vec_len: # [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end res0Lengths = torch.sqrt((res0*res0).sum(2)) res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1) return res0 print("CPC FeatureMaker loaded!") # Quantization of files print("") print(f"Quantizing audio files...") seqQuantLines = [] bar = progressbar.ProgressBar(maxval=len(seqNames)) bar.start() start_time = time() for index, vals in enumerate(seqNames): bar.update(index) file_path = vals[1] file_path = os.path.join(args.pathDB, file_path) # Get features & quantizing cFeatures = feature_function(file_path).cuda() nGroups = cFeatures.size(-1)//clusterModule.Ck.size(-1) cFeatures = cFeatures.view(1, -1, clusterModule.Ck.size(-1)) if len(vals) > 2 and int(vals[-1]) > 9400000: # Librilight, to avoid OOM clusterModule = clusterModule.cpu() cFeatures = cFeatures.cpu() qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1) clusterModule = clusterModule.cuda() else: qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1) qFeatures = qFeatures[0].detach().cpu().numpy() # Transform to quantized line quantLine = ",".join(["-".join([str(i) for i in item]) for item in qFeatures.reshape(-1, nGroups)]) seqQuantLines.append(quantLine) bar.finish() print(f"...done {len(seqQuantLines)} files in {time()-start_time} seconds.") # Saving outputs print("") print(f"Saving outputs to {outputFile}") outLines = [] for vals, quantln in zip(seqNames, seqQuantLines): file_path = vals[1] file_name = os.path.splitext(os.path.basename(file_path))[0] outLines.append("\t".join([file_name, quantln])) with open(outputFile, "w") as f: f.write("\n".join(outLines)) if __name__ == "__main__": args = sys.argv[1:] main(args)
11,578
3,495
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. LIMIT_EXCEEDED_ERROR_MASSAGE = 'Instance limit exceeded. A new one will be launched as soon as free space will be available.' LIMIT_EXCEEDED_EXIT_CODE = 6 class AbstractInstanceProvider(object): def run_instance(self, is_spot, bid_price, ins_type, ins_hdd, ins_img, ins_key, run_id, kms_encyr_key_id, num_rep, time_rep, kube_ip, kubeadm_token): pass def find_and_tag_instance(self, old_id, new_id): pass def verify_run_id(self, run_id): pass def check_instance(self, ins_id, run_id, num_rep, time_rep): pass def get_instance_names(self, ins_id): pass def find_instance(self, run_id): pass def terminate_instance(self, ins_id): pass def terminate_instance_by_ip(self, node_internal_ip, node_name): pass def find_nodes_with_run_id(self, run_id): instance = self.find_instance(run_id) return [instance] if instance is not None else []
1,591
526
import glob import os import os.path import sys import shutil import cPickle from types import StringType, UnicodeType from distutils.core import setup from distutils.extension import Extension from distutils.command.install import install PY3K = sys.version_info[0] > 2 with open('README.rst') as inp: long_description = inp.read() __version__ = '' inp = open('prody/__init__.py') for line in inp: if (line.startswith('__version__')): exec(line.strip()) break inp.close() def isInstalled(module_name): """Check if a required package is installed, by trying to import it.""" try: return __import__(module_name) except ImportError: return False else: return True if not isInstalled('numpy'): print("""NumPy is not installed. This package is required for main ProDy features and needs to be installed before you can use ProDy. You can find NumPy at: http://numpy.scipy.org""") PACKAGES = ['prody', 'prody.atomic', 'prody.dynamics', 'prody.ensemble', 'prody.measure', 'prody.proteins', 'prody.trajectory', 'prody.routines', 'prody.utilities'] PACKAGE_DATA = {} if sys.version_info[:2] > (2,6): PACKAGES.extend(['prody.tests', 'prody.tests.test_kdtree', 'prody.tests.test_measure']) PACKAGE_DATA['prody.tests'] = ['data/pdb*.pdb', 'data/*.dat', 'data/*.coo', 'data/dcd*.dcd'] EXTENSIONS = [] if os.name != 'java' and sys.version_info[0] == 2: pairwise2 = ['cpairwise2.c', 'pairwise2.py'] if all([os.path.isfile(os.path.join('prody', 'proteins', fn)) for fn in pairwise2]): EXTENSIONS.append( Extension('prody.proteins.cpairwise2', ['prody/proteins/cpairwise2.c'], include_dirs=["prody"] )) else: raise Exception('one or more pairwise2 module files are missing') if isInstalled('numpy'): import numpy kdtree_files = ['__init__.py', 'KDTree.c', 'KDTree.h', 'KDTreemodule.c', 'Neighbor.h', 'kdtree.py'] if all([os.path.isfile(os.path.join('prody', 'kdtree', fn)) for fn in kdtree_files]): EXTENSIONS.append( Extension('prody.kdtree._CKDTree', ['prody/kdtree/KDTree.c', 'prody/kdtree/KDTreemodule.c'], include_dirs=[numpy.get_include()], )) else: raise Exception('one or more kdtree module files are missing') PACKAGES.append('prody.kdtree') elif isInstalled('numpy'): raise ImportError('numpy is not installed') SCRIPTS = ['scripts/prody'] setup( name='ProDy', version=__version__, author='Ahmet Bakan', author_email='ahb12 at pitt dot edu', description='A Python Package for Protein Dynamics Analysis', long_description=long_description, url='http://www.csb.pitt.edu/ProDy', packages=PACKAGES, package_data=PACKAGE_DATA, ext_modules=EXTENSIONS, license='GPLv3', keywords=('protein, dynamics, elastic network model, ' 'Gaussian network model, anisotropic network model, ' 'essential dynamics analysis, principal component analysis, ' 'Protein Data Bank, PDB, GNM, ANM, PCA'), classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', ], scripts=SCRIPTS, requires=['NumPy', ], provides=['ProDy({0:s})'.format(__version__)] )
4,140
1,285
"""Remove limit used from query model Revision ID: d7c1a0d6f2da Revises: afc69274c25a Create Date: 2019-06-04 10:12:36.675369 """ # revision identifiers, used by Alembic. revision = "d7c1a0d6f2da" down_revision = "afc69274c25a" import sqlalchemy as sa from alembic import op def upgrade(): with op.batch_alter_table("query") as batch_op: batch_op.drop_column("limit_used") def downgrade(): op.add_column("query", sa.Column("limit_used", sa.BOOLEAN(), nullable=True))
491
218
EPS = 1e-5 import threading import torch import torch.nn as nn import torch.nn.functional as F from .aggregators import AGGREGATORS from .layers import MLP, FCLayer from .scalers import SCALERS class EIGLayerComplex(nn.Module): def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, residual, edge_features, edge_dim, pretrans_layers=1, posttrans_layers=1): super().__init__() # retrieve the aggregators and scalers functions aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] scalers = [SCALERS[scale] for scale in scalers.split()] self.dropout = dropout self.graph_norm = graph_norm self.batch_norm = batch_norm self.edge_features = edge_features self.residual = residual self.aggregators = aggregators self.scalers = scalers self.batchnorm_h = nn.BatchNorm1d(out_dim) self.pretrans = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none') self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim, out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') self.avg_d = avg_d if in_dim != out_dim: self.residual = False def pretrans_edges(self, edges): if self.edge_features: z2 = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1) else: z2 = torch.cat([edges.src['h'], edges.dst['h']], dim=1) return {'e': self.pretrans(z2), 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']} def message_func(self, edges): return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')} def reduce_func(self, nodes): h_in = nodes.data['h'] h = nodes.mailbox['e'] eig_s = nodes.mailbox['eig_s'] eig_d = nodes.mailbox['eig_d'] D = h.shape[-2] to_cat = [] for aggregate in self.aggregators: try: to_cat.append(aggregate(self, h, eig_s, eig_d)) except: to_cat.append(aggregate(self, h, eig_s, eig_d, h_in)) h = torch.cat(to_cat, dim=1) if len(self.scalers) > 1: h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) return {'h': h} def posttrans_nodes(self, nodes): return self.posttrans(nodes.data['h']) def forward(self, g, h, e, snorm_n): h_in = h g.ndata['h'] = h if self.edge_features: # add the edges information only if edge_features = True g.edata['ef'] = e # pretransformation g.apply_edges(self.pretrans_edges) # aggregation g.update_all(self.message_func, self.reduce_func) h = torch.cat([h, g.ndata['h']], dim=1) # posttransformation h = self.posttrans(h) # graph and batch normalization and residual if self.graph_norm: h = h * snorm_n if self.batch_norm: h = self.batchnorm_h(h) h = F.relu(h) if self.residual: h = h_in + h h = F.dropout(h, self.dropout, training=self.training) return h class EIGLayerSimple(nn.Module): def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, residual, avg_d, posttrans_layers=1): super().__init__() # retrieve the aggregators and scalers functions aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] scalers = [SCALERS[scale] for scale in scalers.split()] self.dropout = dropout self.graph_norm = graph_norm self.batch_norm = batch_norm self.residual = residual self.aggregators = aggregators self.scalers = scalers self.batchnorm_h = nn.BatchNorm1d(out_dim) self.posttrans = MLP(in_size=(len(aggregators) * len(scalers)) * in_dim, hidden_size=out_dim, out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') self.avg_d = avg_d if in_dim != out_dim: self.residual = False def pretrans_edges(self, edges): return {'e': edges.src['h'], 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']} def message_func(self, edges): return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')} def reduce_func(self, nodes): h_in = nodes.data['h'] h = nodes.mailbox['e'] eig_s = nodes.mailbox['eig_s'] eig_d = nodes.mailbox['eig_d'] D = h.shape[-2] to_cat = [] for aggregate in self.aggregators: try: to_cat.append(aggregate(self, h, eig_s, eig_d)) except: to_cat.append(aggregate(self, h, eig_s, eig_d, h_in)) h = torch.cat(to_cat, dim=1) if len(self.scalers) > 1: h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) return {'h': h} def posttrans_nodes(self, nodes): return self.posttrans(nodes.data['h']) def forward(self, g, h, e, snorm_n): h_in = h g.ndata['h'] = h g.apply_edges(self.pretrans_edges) # aggregation g.update_all(self.message_func, self.reduce_func) h = g.ndata['h'] # posttransformation h = self.posttrans(h) # graph and batch normalization and residual if self.graph_norm: h = h * snorm_n if self.batch_norm: h = self.batchnorm_h(h) h = F.relu(h) if self.residual: h = h_in + h h = F.dropout(h, self.dropout, training=self.training) return h class EIGTower(nn.Module): def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, pretrans_layers, posttrans_layers, edge_features, edge_dim): super().__init__() self.dropout = dropout self.graph_norm = graph_norm self.batch_norm = batch_norm self.edge_features = edge_features self.aggregators = aggregators self.scalers = scalers self.batchnorm_h = nn.BatchNorm1d(out_dim) self.pretrans = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim, out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none') self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim, out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none') self.avg_d = avg_d def pretrans_edges(self, edges): if self.edge_features: z2 = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1) else: z2 = torch.cat([edges.src['h'], edges.dst['h']], dim=1) return {'e': self.pretrans(z2), 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']} def message_func(self, edges): return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')} def reduce_func(self, nodes): h_in = nodes.data['h'] h = nodes.mailbox['e'] eig_s = nodes.mailbox['eig_s'] eig_d = nodes.mailbox['eig_d'] D = h.shape[-2] to_cat = [] for aggregate in self.aggregators: try: to_cat.append(aggregate(self, h, eig_s, eig_d)) except: to_cat.append(aggregate(self, h, eig_s, eig_d, h_in)) h = torch.cat(to_cat, dim=1) if len(self.scalers) > 1: h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1) return {'h': h} def posttrans_nodes(self, nodes): return self.posttrans(nodes.data['h']) def forward(self, g, h, e, snorm_n): g.ndata['h'] = h if self.edge_features: # add the edges information only if edge_features = True g.edata['ef'] = e # pretransformation g.apply_edges(self.pretrans_edges) # aggregation g.update_all(self.message_func, self.reduce_func) h = torch.cat([h, g.ndata['h']], dim=1) # posttransformation h = self.posttrans(h) # graph and batch normalization if self.graph_norm: h = h * snorm_n if self.batch_norm: h = self.batchnorm_h(h) h = F.dropout(h, self.dropout, training=self.training) return h class EIGLayerTower(nn.Module): """ Param: [in_dim, out_dim, n_heads] """ def __init__(self, in_dim, out_dim, aggregators, scalers, avg_d, dropout, graph_norm, batch_norm, towers=5, pretrans_layers=1, posttrans_layers=1, divide_input=True, residual=False, edge_features=False, edge_dim=0): super().__init__() assert (( not divide_input) or in_dim % towers == 0), "if divide_input is set the number of towers has to divide in_dim" assert (out_dim % towers == 0), "the number of towers has to divide the out_dim" assert avg_d is not None # retrieve the aggregators and scalers functions aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()] scalers = [SCALERS[scale] for scale in scalers.split()] self.divide_input = divide_input self.input_tower = in_dim // towers if divide_input else in_dim self.output_tower = out_dim // towers self.in_dim = in_dim self.out_dim = out_dim self.edge_features = edge_features self.residual = residual if in_dim != out_dim: self.residual = False # convolution self.towers = nn.ModuleList() for _ in range(towers): self.towers.append(EIGTower(in_dim=self.input_tower, out_dim=self.output_tower, aggregators=aggregators, scalers=scalers, avg_d=avg_d, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, batch_norm=batch_norm, dropout=dropout, graph_norm=graph_norm, edge_features=edge_features, edge_dim=edge_dim)) # mixing network self.mixing_network = FCLayer(out_dim, out_dim, activation='LeakyReLU') def forward(self, g, h, e, snorm_n): h_in = h # for residual connection if self.divide_input: h_cat = torch.cat( [tower(g, h[:, n_tower * self.input_tower: (n_tower + 1) * self.input_tower], e, snorm_n) for n_tower, tower in enumerate(self.towers)], dim=1) else: h_cat = torch.cat([tower(g, h, e, snorm_n) for tower in self.towers], dim=1) if len(self.towers) > 1: h_out = self.mixing_network(h_cat) else: h_out = h_cat if self.residual: h_out = h_in + h_out # residual connection return h_out class EIGLayer(nn.Module): def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, type_net, residual, towers=5, divide_input=True, edge_features=None, edge_dim=None, pretrans_layers=1, posttrans_layers=1,): super().__init__() self.type_net = type_net if type_net == 'simple': self.model = EIGLayerSimple(in_dim=in_dim, out_dim=out_dim, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, residual=residual, aggregators=aggregators, scalers=scalers, avg_d=avg_d, posttrans_layers=posttrans_layers) elif type_net == 'complex': self.model = EIGLayerComplex(in_dim=in_dim, out_dim=out_dim, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, aggregators=aggregators, residual=residual, scalers=scalers, avg_d=avg_d, edge_features=edge_features, edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers) elif type_net == 'towers': self.model = EIGLayerTower(in_dim=in_dim, out_dim=out_dim, aggregators=aggregators, scalers=scalers, avg_d=avg_d, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, towers=towers, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, divide_input=divide_input, residual=residual, edge_features=edge_features, edge_dim=edge_dim) def __repr__(self): return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim)
13,393
4,636
# -*- coding: utf-8 -*- import os import json import hashlib from warnings import simplefilter from utils import log_util # ignore all warnings simplefilter(action='ignore') ##Global parameters scriptDir = os.path.dirname(__file__) dataPath = os.path.join(scriptDir, '..', 'training_data', 'intents') propertyFile = os.path.join(scriptDir, '..', 'config', 'nlp.properties') separator = "=" properties = {} def load_parameters() -> None: global properties with open(propertyFile) as f: for line in f: if separator in line: name, value = line.split(separator, 1) properties[name.strip()] = value.strip() def getProperties(): global properties return properties def get_parameter(param): global properties res = "" if param in properties: res = properties[param] return res else: log_util.log_infomsg('[NLP_CONFIG] the required parameter could not be located'.format(param)) return res def check_data_available(self) -> bool: files = os.listdir(dataPath) for file in files: if (file.startswith(self.domain)): if file.endswith(self.format): return True else: pass return False def is_config_stale(domain, locale): global properties tmpFile = os.path.join(scriptDir, '..', 'training_data', 'tmp', domain + '_hashdump') try: tmp = open(tmpFile, 'r') except IOError: tmp = open(tmpFile, 'a+') hash_original = tmp.read() # need to check if any changes to data, property file or rasa config file dataFile = os.path.join(dataPath, domain + '_' + locale + '.' + get_parameter('FORMAT')) data_1 = open(dataFile, 'rb').read() # check if any changs in properties load_parameters() data_2 = json.dumps(getProperties()) if (get_parameter('ALGORITHM') == 'NLU'): rasaConfigFile = os.path.join(scriptDir, '..', 'core', 'config', get_parameter('CONFIG_FILE')) data_3 = open(rasaConfigFile, 'rb').read() else: data_3 = None totalData = str(data_1) + str(data_2) + str(data_3) hash_current = hashlib.md5(totalData.encode('utf-8')).hexdigest() if (hash_original == hash_current): return True else: tmp.close() tmp = open(tmpFile, 'w') tmp.write(hash_current) tmp.close() return False def ensemble_confidence_score(response_1, response_2): scores_1 = get_scores(response_1) scores_2 = get_scores(response_2) # replace scores_1 with weighted average for item in scores_1: if item in scores_2: scores_1[item] = "{:.2f}".format((float(scores_1[item]) + float(scores_2[item])) / 2) else: scores_1[item] = "{:.2f}".format(float(scores_1[item]) / 2) # update the confidence score with new one for items in response_1["intent_ranking"]: if items["name"] in scores_1: items['confidence'] = scores_1[items["name"]] # update the intent JSONObject response_1["intent"]["confidence"] = scores_1[response_1["intent"]["name"]] return response_1 def get_scores(response): scores = {} for items in response["intent_ranking"]: scores[items["name"]] = items["confidence"] return scores def normalise_entity_score(response): if len(response["entities"]) != 0: for items in response["entities"]: items["confidence_entity"] = "{:.3f}".format(items["confidence_entity"]) return response
3,553
1,120
import io import json from google.auth import compute_engine from google.oauth2 import service_account def gcp_credentials(service_account_file): if service_account_file: with io.open(service_account_file, 'r', encoding='utf-8') as json_fi: credentials_info = json.load(json_fi) credentials = service_account.Credentials.from_service_account_info(credentials_info) else: # Explicitly use Compute Engine credentials. These credentials are # available on Compute Engine, App Engine Flexible, and Container Engine. credentials = compute_engine.Credentials() return credentials
641
168
import requests import tweepy import random import time import os import bs4 from bs4 import BeautifulSoup from pybooru import Moebooru siteurl='https://www.sakugabooru.com/post/show/' header = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'} client = Moebooru(site_url='https://www.sakugabooru.com') files = client.post_list(tags="order:random") api_keys = open("token.txt") #Create your own token.txt file with your API Keys from Twitter lines = api_keys.readlines() consumer_key = lines[1].rstrip() consumer_secret= lines[4].rstrip() access_token = lines[7].rstrip() access_token_secret=lines[10].rstrip() def main(): try: files = client.post_list(tags="order:random") #Random Post choice = random.choice(files) #Select 1 Random Post from Query boorurl=choice['file_url'] #File URL tags = choice['tags'] #Post Tags verdict=filetypechecker(boorurl) #Checker if .mp4 file or not if(verdict): posturl = siteurl+"{0}".format(choice['id']) #POST URL from SakugaBooru animatorname=artistgrabber(posturl) animename=animegrabber(posturl) time.sleep(5) data = requests.get(boorurl,headers=header) print("data:",data.status_code) with open("C:/Users/Admin/Documents/PersonalFiles/Repositories/sakugabooru-video-files/{}".format(choice['id'])+".mp4",'wb') as file: #Customize Directory file.write(data.content) #params="Animator Name: {}\nTags: {}\nPost URL: {}\n".format(animatorname,tags,posturl) #BETA TESTING params="Animator Name: {}\nListed Anime Name: {}\nTags: {}\nPost URL: {}\n".format(animatorname,animename,tags,posturl) #print(params) time.sleep(5) mediapost(params) except Exception as e: print("Main() Error:",e) def artistgrabber(posturl): r = requests.get(posturl,headers=header) print("artistgrabber:",r.status_code) soup = bs4.BeautifulSoup(r.text,'lxml') ''' for div in soup.find_all(class_="sidebar"): artist=div.find(class_="tag-type-artist").text artistname=(artist.strip("? ")) ''' for div in soup.find_all(class_="tag-type-artist"): atags = div.find_all('a') for artists in atags: artiststr=artists.text print(artiststr) return artiststr #BETA TESTING def animegrabber(posturl): r = requests.get(posturl,headers=header) print("animegrabber:",r.status_code) soup = bs4.BeautifulSoup(r.text,'lxml') for div in soup.find_all(class_="tag-type-copyright"): atags = div.find_all('a') for anime in atags: animestr=anime.text print(animestr) return animestr def filetypechecker(boorurl): if boorurl.find('/'): if ".mp4" in (boorurl.rsplit('/',1)[1]): return True else: return False def mediapost(params): try: auth = tweepy.OAuthHandler(consumer_key,consumer_secret) auth.set_access_token(access_token,access_token_secret) api = tweepy.API(auth) except Exception as e: print (e) try: file_path=[] directory_name='C:/Users/Admin/Documents/PersonalFiles/Repositories/sakugabooru-video-files' #Customize Directory media_list=filter(lambda x: os.path.isfile(os.path.join(directory_name,x)),os.listdir(directory_name)) media_list=sorted(media_list,key=lambda x: os.path.getmtime(os.path.join(directory_name,x)),reverse=True) for media in media_list: file_path.append(os.path.join(directory_name,media)) media=file_path[0] print(media) upload_media=api.media_upload(media, media_category='tweet_video') api.update_status(status=params, media_ids=[upload_media.media_id_string]) except Exception as e: print("Mediapost() Error:",e) if __name__ == '__main__': main()
4,218
1,388
""" """ from membership.web.urls import membership_urls from public.web.urls import error_urls, public_urls, static_urls from public.web.views import home from wheezy.routing import url locale_pattern = "{locale:(en|ru)}/" locale_defaults = {"locale": "en"} locale_urls = public_urls + membership_urls locale_urls.append(("error/", error_urls)) all_urls = [ url("", home, locale_defaults, name="default"), (locale_pattern, locale_urls, locale_defaults), ] all_urls += static_urls
491
158
from stheno import ( B, # Linear algebra backend Graph, # Graph that keep track of the graphical model GP, # Gaussian process EQ, # Squared-exponential kernel Matern12, # Matern-1/2 kernel Matern52, # Matern-5/2 kernel Delta, # Noise kernel Normal, # Gaussian distribution Diagonal, # Diagonal matrix dense, # Convert matrix objects to regular matrices ) __all__ = ['model', 'project', 'objective', 'predict'] def model(vs, m): """Construct model. Args: vs (:class:`varz.Vars`): Variable container. m (int): Number of latent processes. Returns: tuple: Tuple containing a list of the latent processes, the observation noise, and the noises on the latent processes. """ g = Graph() # Observation noise: noise_obs = vs.bnd(0.1, name='noise_obs') def make_latent_process(i): # Long-term trend: variance = vs.bnd(0.9, name=f'{i}/long_term/var') scale = vs.bnd(2 * 30, name=f'{i}/long_term/scale') kernel = variance * EQ().stretch(scale) # Short-term trend: variance = vs.bnd(0.1, name=f'{i}/short_term/var') scale = vs.bnd(20, name=f'{i}/short_term/scale') kernel += variance * Matern12().stretch(scale) return GP(kernel, graph=g) # Latent processes: xs = [make_latent_process(i) for i in range(m)] # Latent noises: noises_latent = vs.bnd(0.1 * B.ones(m), name='noises_latent') return xs, noise_obs, noises_latent def project(vs, m, y_data, locs): """Project the data. Args: vs (:class:`varz.Vars`): Variable container. m (int): Number of latent processes. y_data (tensor): Observations. locs (tensor): Spatial locations of observations. Returns: tuple: Tuple containing the projected outputs, the mixing matrix, S from the mixing matrix, and the observation noises. """ _, noise_obs, noises_latent = model(vs, m) # Construct mixing matrix and projection. scales = vs.bnd(B.ones(2), name='scales') K = dense(Matern52().stretch(scales)(locs)) U, S, _ = B.svd(K) S = S[:m] H = U[:, :m] * S[None, :] ** .5 T = B.transpose(U[:, :m]) / S[:, None] ** .5 # Project data and unstack over latent processes. y_proj = B.unstack(B.matmul(T, y_data, tr_b=True)) # Observation noises: noises_obs = noise_obs * B.ones(B.dtype(noise_obs), B.shape(y_data)[1]) return y_proj, H, S, noises_obs def objective(vs, m, x_data, y_data, locs): """NLML objective. Args: vs (:class:`varz.Vars`): Variable container. m (int): Number of latent processes. x_data (tensor): Time stamps of the observations. y_data (tensor): Observations. locs (tensor): Spatial locations of observations. Returns: scalar: Negative log-marginal likelihood. """ y_proj, _, S, noises_obs = project(vs, m, y_data, locs) xs, noise_obs, noises_latent = model(vs, m) # Add contribution of latent processes. lml = 0 for i, (x, y) in enumerate(zip(xs, y_proj)): e_signal = GP((noise_obs / S[i] + noises_latent[i]) * Delta(), graph=x.graph) lml += (x + e_signal)(x_data).logpdf(y) e_noise = GP(noise_obs / S[i] * Delta(), graph=x.graph) lml -= e_noise(x_data).logpdf(y) # Add regularisation contribution. lml += B.sum(Normal(Diagonal(noises_obs)).logpdf(B.transpose(y_data))) # Return negative the evidence, normalised by the number of data points. n, p = B.shape(y_data) return -lml / (n * p) def predict(vs, m, x_data, y_data, locs, x_pred): """Make predictions. Args: vs (:class:`varz.Vars`): Variable container. m (int): Number of latent processes. x_data (tensor): Time stamps of the observations. y_data (tensor): Observations. locs (tensor): Spatial locations of observations. x_pred (tensor): Time stamps to predict at. Returns: tuple: Tuple containing the predictions for the latent processes and predictions for the observations. """ # Construct model and project data for prediction. xs, noise_obs, noises_latent = model(vs, m) y_proj, H, S, noises_obs = project(vs, m, y_data, locs) L = noise_obs / S + noises_latent # Condition latent processes. xs_posterior = [] for x, noise, y in zip(xs, L, y_proj): e = GP(noise * Delta(), graph=x.graph) xs_posterior.append(x | ((x + e)(x_data), y)) xs = xs_posterior # Extract posterior means and variances of the latent processes. x_means, x_vars = zip(*[(x.mean(x_pred)[:, 0], x.kernel.elwise(x_pred)[:, 0]) for x in xs]) # Construct predictions for latent processes. lat_preds = [B.to_numpy(mean, mean - 2 * (var + L[i]) ** .5, mean + 2 * (var + L[i]) ** .5) for i, (mean, var) in enumerate(zip(x_means, x_vars))] # Pull means through mixing matrix. x_means = B.stack(*x_means, axis=0) y_means = B.matmul(H, x_means) # Pull variances through mixing matrix and add noise. x_vars = B.stack(*x_vars, axis=0) y_vars = B.matmul(H ** 2, x_vars + noises_latent[:, None]) + noise_obs # Construct predictions for observations. obs_preds = [(mean, mean - 2 * var ** .5, mean + 2 * var ** .5) for mean, var in zip(y_means, y_vars)] return lat_preds, obs_preds
5,571
1,959
import itertools as it from tests.utilities import ( is_equivalent_atom, is_equivalent_molecule, ) def are_equivalent_functional_groups( functional_groups1, functional_groups2, ): functional_groups = it.zip_longest( functional_groups1, functional_groups2, ) for fg1, fg2 in functional_groups: is_equivalent_functional_group(fg1, fg2) def is_equivalent_functional_group( functional_group1, functional_group2, ): assert functional_group1.__class__ is functional_group2.__class__ atoms = it.zip_longest( functional_group1.get_atoms(), functional_group2.get_atoms(), ) for atom1, atom2 in atoms: is_equivalent_atom(atom1, atom2) for placer_id1, placer_id2 in it.zip_longest( functional_group1.get_placer_ids(), functional_group2.get_placer_ids(), ): assert placer_id1 == placer_id2 for core_atom_id1, core_atom_id2 in it.zip_longest( functional_group1.get_core_atom_ids(), functional_group2.get_core_atom_ids(), ): assert core_atom_id1 == core_atom_id2 def is_equivalent_building_block(building_block1, building_block2): is_equivalent_molecule(building_block1, building_block2) are_equivalent_functional_groups( functional_groups1=building_block1.get_functional_groups(), functional_groups2=building_block2.get_functional_groups(), ) for placer_id1, placer_id2 in it.zip_longest( building_block1.get_placer_ids(), building_block2.get_placer_ids(), ): assert placer_id1 == placer_id2 for core_atom_id1, core_atom_id2 in it.zip_longest( building_block1.get_core_atom_ids(), building_block2.get_core_atom_ids(), ): assert core_atom_id1 == core_atom_id2 def is_clone_building_block(building_block1, building_block2): assert building_block1 is not building_block2 is_equivalent_building_block(building_block1, building_block2)
1,995
683
from .pydeps import pydeps pydeps()
36
16
from os import listdir from os.path import join import os, errno def getImageNum(rootDir): return len(listdir(join(rootDir))) def safeMkdir(path:str): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise
271
95
#coding:utf-8 import re import math from docclass import Classifier def test_infc_func(): c = Classifier(getfeatures=None) c.infc("python", "good") c.infc("python", "good") c.infc("the", "bad") c.infc("the", "good") print c.fc if __name__ == "__main__": test_infc_func()
305
124
import abc import time from dataclasses import dataclass from functools import partial from typing import TYPE_CHECKING import kubernetes from kubernetes.utils.create_from_yaml import FailToCreateError from urllib3.exceptions import MaxRetryError from hermes.cloudbreak.utils import snakeify, wait_for if TYPE_CHECKING: from hermes.cloudbreak.kubernetes import K8sApiClient @dataclass class Resource(abc.ABC): _client: "K8sApiClient" name: str namespace: str = "default" MAX_RETRY_GRACE_SECONDS = 300 STATUS_AVAILABLE_GRACE_SECONDS = 10 @classmethod def create(cls, client, config): if config["kind"] == "Deployment": cls = Deployment elif config["kind"] == "Service": cls = Service elif config["kind"] == "DaemonSet": cls = DaemonSet else: raise ValueError( "Resource kind {} not supported yet".format(config["kind"]) ) metadata = config["metadata"] obj = cls(client, metadata["name"], metadata["namespace"]) create_fn = partial( kubernetes.utils.create_from_dict, k8s_client=client._client, data=config, ) response = obj._make_a_request(create_fn) if response is None: raise MaxRetryError return obj def __post_init__(self): self._creation_time = time.time() self._unavailable = False self._unavailable_time = None @abc.abstractproperty def client(self): pass def _make_a_request(self, request_fn, do_raise=False): try: # try to make the request return request_fn() except ( kubernetes.client.exceptions.ApiException, FailToCreateError, ) as e: try: # create from yaml wraps around API exceptions, # so grab the underlying exception here first status = e.api_exceptions[0].status except AttributeError: status = e.status if status != 401: raise if not do_raise: self._client.cluster.refresh_credentials() self._client._client.configuration.api_key[ "authorization" ] = self._client.cluster.token # try the request again with do_raise set to # true to indicate that these credentials just # don't have access to this cluster return self._make_a_request(request_fn, do_raise=True) else: # if do_raise is set, indicate that the request # is unauthorized raise RuntimeError("Unauthorized request to cluster") except MaxRetryError: # sometimes this error can get raised if the master nodes # of the cluster are busy doing something. Return None # to indicate this is happening but give things a few # minutes to get back to normal if not self._unavailable: self._unavailable = True self._unavailable_time = time.time() elif ( time.time() - self._unavailable_time ) < self.MAX_RETRY_GRACE_SECONDS: raise RuntimeError( "Deployment {} has been unavailable for {} seconds".format( self.name, self.MAX_RETRY_GRACE_SECONDS ) ) return None except Exception as e: print(type(e), e) raise def get(self): resource_type = snakeify(self.__class__.__name__) get_fn = partial( getattr(self.client, f"read_namespaced_{resource_type}_status"), name=self.name, namespace=self.namespace, ) try: response = self._make_a_request(get_fn) self._unavailable = False return response except kubernetes.client.ApiException as e: if e.status == 404: raise RuntimeError(f"{self.message} no longer exists") raise def delete(self): resource_type = snakeify(self.__class__.__name__) delete_fn = partial( getattr(self.client, f"delete_namespaced_{resource_type}_status"), name=self.name, namespace=self.namespace, ) return self._make_a_request(delete_fn) @abc.abstractmethod def is_ready(self): pass def wait_for_ready(self): wait_for( self.is_ready, f"Waiting for {self.message} to become ready", ) def submit_delete(self): try: response = self.delete() return response is not None except kubernetes.client.ApiException as e: if e.status == 404: return True raise def is_deleted(self): try: self.get() except RuntimeError as e: if str(e).endswith("no longer exists"): return True raise else: return False def remove(self): if not self.submit_delete(): wait_for( self.submit_delete, f"Waiting for {self.message} to become available to delete", ) if not self.is_deleted(): # give us a chance to not have to display the progress bar wait_for(self.is_deleted, f"Waiting for {self.message} to delete") else: # TODO: logging? print(f"Deleted {self.message}") # TODO: remove this from self._client resources? @property def message(self): resource_type = snakeify(self.__class__.__name__).replace("_", " ") return " ".join([resource_type, self.name]) class Deployment(Resource): @property def client(self): return kubernetes.client.AppsV1Api(self._client._client) # TODO: custom wait that clocks that the number of available instances def is_ready(self): response = self.get() if response is None: return False conditions = response.status.conditions if conditions is None: return False statuses = {i.type: eval(i.status) for i in conditions} if len(statuses) == 0 and ( (time.time() - self._creation_time) > self.STATUS_AVAILABLE_GRACE_SECONDS ): raise RuntimeError( "Deployment {} has gone {} seconds with no " "available status information".format( self.name, self.STATUS_AVAILABLE_GRACE_SECONDS ) ) try: if statuses["Available"]: return True except KeyError: try: if not statuses["Progressing"]: raise RuntimeError(f"{self.message} stopped progressing") except KeyError: return False def scale(self, replicas: int): response = self.get() if response is None: return False response.spec.replicas = replicas scale_fn = partial( self.client.patch_namespaced_deployment_scale, name=self.name, namespace=self.namespace, body=response, ) return self._make_a_request(scale_fn) @dataclass class Service(Resource): """Really represents specifically a LoadBalancer""" def __post_init__(self): self._ip = None @property def client(self): return kubernetes.client.CoreV1Api(self._client._client) @property def ip(self): if self._ip is None: response = self.get() if response is None: return None try: self._ip = response.status.load_balancer.ingress[0].ip except TypeError: return None return self._ip def is_ready(self): # server is considered ready once it has a public IP address return self.ip is not None class DaemonSet(Resource): @property def client(self): return kubernetes.client.AppsV1Api(self._client._client) def is_ready(self): response = self.get() if response is None: return False status = response.status return status.desired_number_scheduled == status.number_ready
8,560
2,268
c = float(input("Enter Amount Between 0-99 :")) print(c // 20, "Twenties") c = c % 20 print(c // 10, "Tens") c = c % 10 print(c // 5, "Fives") c = c % 5 print(c // 1, "Ones") c = c % 1 print(c // 0.25, "Quarters") c = c % 0.25 print(c // 0.1, "Dimes") c = c % 0.1 print(c // 0.05, "Nickles") c = c % 0.05 print(c // 0.01, "Pennies")
333
190
#!/usr/bin/env python # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test that the Content-Disposition holds the correct filename.""" import logging import os import re import freezegun import pytest import responses import d1_gmn.tests.gmn_mock import d1_gmn.tests.gmn_test_case import d1_test.d1_test_case import d1_test.instance_generator import d1_test.instance_generator.identifier logger = logging.getLogger(__name__) @d1_test.d1_test_case.reproducible_random_decorator("TestContentDisposition") class TestContentDisposition(d1_gmn.tests.gmn_test_case.GMNTestCase): @responses.activate def _check( self, client, did, sysmeta_filename, sysmeta_format_id, expected_base_name, expected_file_ext, ): with freezegun.freeze_time("1981-05-02"): with d1_gmn.tests.gmn_mock.disable_auth(): base_name, file_ext = self._create_obj( client, did, sysmeta_filename, sysmeta_format_id ) assert base_name == expected_base_name assert file_ext == expected_file_ext def _create_obj(self, client, did, sysmeta_filename, sysmeta_format_id): pid, sid, send_sciobj_bytes, send_sysmeta_pyxb = self.create_obj( client, pid=did, fileName=sysmeta_filename, formatId=sysmeta_format_id ) # View response response = client.get(pid) # self.sample.gui_sxs_diff(response, "", "response") # View SysMeta # self.sample.gui_sxs_diff(client.getSystemMetadata(pid), "", "sysmeta") return self._extract_filename(response) def _extract_filename(self, response): file_name = re.search( r'filename="(.*)"', response.headers["Content-Disposition"] ).group(1) return os.path.splitext(file_name) def test_1000(self, gmn_client_v2): """SciObj without fileName returns filename generated from PID and formatId. When formatId is unknown, returns filename with extension, ".data". """ pid = d1_test.instance_generator.identifier.generate_pid() self._check(gmn_client_v2, pid, None, "unknown_format_id", pid, ".data") @pytest.mark.parametrize( "format_id,file_ext", [ ("text/tsv", ".tsv"), ("video/x-ms-wmv", ".wmv"), ("-//ecoinformatics.org//eml-access-2.0.0beta4//EN", ".xml"), ], ) def test_1010(self, gmn_client_v2, format_id, file_ext): """SciObj without fileName returns filename generated from PID and formatId. When formatId is valid, returns filename with extension from objectFormatList. """ pid = d1_test.instance_generator.identifier.generate_pid() self._check(gmn_client_v2, pid, None, format_id, pid, file_ext) @pytest.mark.parametrize( "format_id,file_ext,base_name", [ ("text/tsv", ".tsv", "myfile"), ("video/x-ms-wmv", ".wmv", "my video file"), ( "-//ecoinformatics.org//eml-access-2.0.0beta4//EN", ".xml", "An EML XML file", ), ], ) def test_1020(self, gmn_client_v2, format_id, base_name, file_ext): """SciObj with fileName without extension returns filename generated from fileName and formatId. When formatId is valid, returns filename with extension from objectFormatList. """ pid = d1_test.instance_generator.identifier.generate_pid() self._check(gmn_client_v2, pid, base_name, format_id, base_name, file_ext) def test_1030(self, gmn_client_v2): """SciObj with fileName without extension returns filename generated from fileName and formatId. When formatId is unknown, returns filename with extension, ".data". """ pid = d1_test.instance_generator.identifier.generate_pid() self._check(gmn_client_v2, pid, pid, "unknown_format_id", pid, ".data")
4,772
1,477
import copy import os.path as osp import tempfile import pytest import torch from mmocr.models import build_detector def _create_dummy_vocab_file(vocab_file): with open(vocab_file, 'w') as fw: for char in list(map(chr, range(ord('a'), ord('z') + 1))): fw.write(char + '\n') def _get_config_module(fname): """Load a configuration as a python module.""" from mmcv import Config config_mod = Config.fromfile(fname) return config_mod def _get_detector_cfg(fname): """Grab configs necessary to create a detector. These are deep copied to allow for safe modification of parameters without influencing other tests. """ config = _get_config_module(fname) model = copy.deepcopy(config.model) return model @pytest.mark.parametrize( 'cfg_file', ['configs/ner/bert_softmax/bert_softmax_cluener_18e.py']) def test_bert_softmax(cfg_file): # prepare data texts = ['中'] * 47 img = [31] * 47 labels = [31] * 128 input_ids = [0] * 128 attention_mask = [0] * 128 token_type_ids = [0] * 128 img_metas = { 'texts': texts, 'labels': torch.tensor(labels).unsqueeze(0), 'img': img, 'input_ids': torch.tensor(input_ids).unsqueeze(0), 'attention_masks': torch.tensor(attention_mask).unsqueeze(0), 'token_type_ids': torch.tensor(token_type_ids).unsqueeze(0) } # create dummy data tmp_dir = tempfile.TemporaryDirectory() vocab_file = osp.join(tmp_dir.name, 'fake_vocab.txt') _create_dummy_vocab_file(vocab_file) model = _get_detector_cfg(cfg_file) model['label_convertor']['vocab_file'] = vocab_file detector = build_detector(model) losses = detector.forward(img, img_metas) assert isinstance(losses, dict) model['loss']['type'] = 'MaskedFocalLoss' detector = build_detector(model) losses = detector.forward(img, img_metas) assert isinstance(losses, dict) tmp_dir.cleanup() # Test forward test with torch.no_grad(): batch_results = [] result = detector.forward(None, img_metas, return_loss=False) batch_results.append(result)
2,159
788
class Cita: def __init__(self,id,solicitante,fecha,hora,motivo,estado,doctor): self.id = id self.solicitante = solicitante self.fecha = fecha self.hora = hora self.motivo = motivo self.estado = estado self.doctor = doctor def getId(self): return self.id def getSolicitante(self): return self.solicitante def getFecha(self): return self.fecha def getHora(self): return self.hora def getMotivo(self): return self.motivo def getEstado(self): return self.estado def getDoctor(self): return self.doctor def setSolicitante(self,solicitante): self.solicitante = solicitante def setFecha(self,fecha): self.fecha = fecha def setHora(self,hora): self.hora = hora def setMotivo(self,motivo): self.motivo = motivo def setEstado(self,estado): self.estado = estado def setDoctor(self,doctor): self.doctor = doctor
1,106
374
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/layers/models.layers.graph.ipynb (unless otherwise specified). __all__ = ['FiGNN_Layer', 'GraphLayer'] # Cell import torch import torch.nn as nn import torch.nn.functional as F from itertools import product # Cell class FiGNN_Layer(nn.Module): def __init__(self, num_fields, embedding_dim, gnn_layers=3, reuse_graph_layer=False, use_gru=True, use_residual=True, device=None): super(FiGNN_Layer, self).__init__() self.num_fields = num_fields self.embedding_dim = embedding_dim self.gnn_layers = gnn_layers self.use_residual = use_residual self.reuse_graph_layer = reuse_graph_layer self.device = device if reuse_graph_layer: self.gnn = GraphLayer(num_fields, embedding_dim) else: self.gnn = nn.ModuleList([GraphLayer(num_fields, embedding_dim) for _ in range(gnn_layers)]) self.gru = nn.GRUCell(embedding_dim, embedding_dim) if use_gru else None self.src_nodes, self.dst_nodes = zip(*list(product(range(num_fields), repeat=2))) self.leaky_relu = nn.LeakyReLU(negative_slope=0.01) self.W_attn = nn.Linear(embedding_dim * 2, 1, bias=False) def build_graph_with_attention(self, feature_emb): src_emb = feature_emb[:, self.src_nodes, :] dst_emb = feature_emb[:, self.dst_nodes, :] concat_emb = torch.cat([src_emb, dst_emb], dim=-1) alpha = self.leaky_relu(self.W_attn(concat_emb)) alpha = alpha.view(-1, self.num_fields, self.num_fields) mask = torch.eye(self.num_fields).to(self.device) alpha = alpha.masked_fill(mask.byte(), float('-inf')) graph = F.softmax(alpha, dim=-1) # batch x field x field without self-loops return graph def forward(self, feature_emb): g = self.build_graph_with_attention(feature_emb) h = feature_emb for i in range(self.gnn_layers): if self.reuse_graph_layer: a = self.gnn(g, h) else: a = self.gnn[i](g, h) if self.gru is not None: a = a.view(-1, self.embedding_dim) h = h.view(-1, self.embedding_dim) h = self.gru(a, h) h = h.view(-1, self.num_fields, self.embedding_dim) else: h = a + h if self.use_residual: h += feature_emb return h # Cell class GraphLayer(nn.Module): def __init__(self, num_fields, embedding_dim): super(GraphLayer, self).__init__() self.W_in = torch.nn.Parameter(torch.Tensor(num_fields, embedding_dim, embedding_dim)) self.W_out = torch.nn.Parameter(torch.Tensor(num_fields, embedding_dim, embedding_dim)) nn.init.xavier_normal_(self.W_in) nn.init.xavier_normal_(self.W_out) self.bias_p = nn.Parameter(torch.zeros(embedding_dim)) def forward(self, g, h): h_out = torch.matmul(self.W_out, h.unsqueeze(-1)).squeeze(-1) # broadcast multiply aggr = torch.bmm(g, h_out) a = torch.matmul(self.W_in, aggr.unsqueeze(-1)).squeeze(-1) + self.bias_p return a
3,333
1,134
#!venv/bin/python # coding=UTF-8 # -*- coding: UTF-8 -*- # vim: set fileencoding=UTF-8 : """ Double-deck bid euchre Implementation is similar to the rules given by Craig Powers https://www.pagat.com/euchre/bideuch.html Notable differences (to match how I learned in high school calculus) include: * Minimum bid of 6 (which can be stuck to the dealer) * Shooters and loners are separate bids (guessing as ±18 for shooter, similar to a loner) * Shooters are a mandatory 2 card exchange with your partner * Trump isn't announced until after bidding has concluded * Winner of bid leads the first hand * Winning your bid gives you (tricks earned + 2) points Mothjab is a funny word with no current meaning. """ from cardstock import * debug: Optional[bool] = False o: Optional[TextIO] = None log_dir: str = game_out_dir(os.path.basename(__file__).split(".py")[0]) def p(msg): global o click.echo(msg, o) def px(msg) -> None: global debug if debug: p(msg) class EuchrePlayer(BasePlayer, abc.ABC): desired_trump: Bid def __init__(self, g: "GameType", /, name: str, is_bot: int = 1, **kwargs): super().__init__(g, name, is_bot) self.tricks: int = 0 self.bid_estimates: Dict[Bid, int] = {} self.reset_bids() def reset_bids(self) -> None: for t in Bid: self.bid_estimates[t] = 0 @property def shoot_strength(self) -> int: return self.in_game.shoot_strength @property def choose_trump(self) -> Bid: return self.desired_trump @abc.abstractmethod def make_bid( self, valid_bids: List[int], min_bid: int = 0, leading_player: "Optional[EuchrePlayer]" = None, ) -> int: pass def trumpify_hand(self, trump_suit: Optional[Suit], is_lo: bool = False) -> None: """Marks the trump suit and sort the hands""" if trump_suit: self.hand.trumpify(trump_suit) self.sort_hand(is_lo) def receive_shooter(self, **kwargs) -> None: shot = PassList( list(self.teammates), directions=[pass_shoot] * self.in_game.shoot_strength, specific_destination=cycle([self]), sort_low=self.in_game.low_win, ) shot.collect_cards() shot.distribute_cards() class HumanPlayer(BaseHuman, EuchrePlayer): def __init__(self, g: "GameType", /, name: str): BaseHuman.__init__(self, g, name) EuchrePlayer.__init__(self, g, name, 0) @property def choose_trump(self) -> Bid: p(self.hand) # give a closer look at your hand before bidding bids: List[str] = [c for c in Bid.__members__] bids.extend([Bid[c].short_name for c in Bid.__members__]) bid: str = click.prompt( "Declare Trump", type=click.Choice(bids, False), show_choices=False, ).upper() return Bid[[b for b in Bid.__members__ if (bid in b)][0]] def make_bid( self, valid_bids: List[int], min_bid: int = 0, leading_player: "Optional[EuchrePlayer]" = None, ) -> int: self.hand.sort(key=key_display4human) p(self.hand) return int( click.prompt( "How much to bid", type=click.Choice( ["0"] + [str(x) for x in valid_bids if (x >= min_bid)], False, ), ) ) class ComputerPlayer(BaseComputer, EuchrePlayer): sort_key = key_trump_power def __init__(self, g: "GameType", /, name: str): BaseComputer.__init__(self, g, name) EuchrePlayer.__init__(self, g, name, 1) def make_bid( self, valid_bids: List[int], min_bid: int = 0, leading_player: "Optional[EuchrePlayer]" = None, ) -> int: if max(self.bid_estimates.values()) == 0: self.bid_estimates = { t: self.simulate_hand( h_p=deepcopy(self.hand), d_p=deepcopy(self.card_count), handedness=self.in_game.handedness, t=t, ) for t in Bid } # pick the biggest # any decisions based on the current winning bid should happen here bid: int = max(self.bid_estimates.values()) self.desired_trump = random.choice( [k for k in self.bid_estimates.keys() if (self.bid_estimates[k] == bid)] ) # don't outbid your partner (within reason) if leading_player in self.teammates and bid - min_bid < 2: return 0 # can you do it by yourself? if bid == len(self.hand) - 1: return valid_bids[-2] # call a shooter elif bid == len(self.hand): return valid_bids[-1] # call a loner # don't bid outrageously if you don't have to # count on two tricks from your partner return bid + self.shoot_strength * len(self.teammates) def pick_card(self, valid_cards: Hand, **kwargs,) -> Card: tp: Trick = kwargs.get("trick_in_progress") is_low: bool = kwargs.get("is_low") unplayed: Hand = self.card_count broken: Dict[Suit, Union[Team, None, bool]] = self.in_game.suit_safety # TODO be less stupid with large games (>4 players) def winning_leads(ss: List[Suit], st: bool = True) -> List[Card]: wl: List[Card] = [] for s in ss: wl.extend( self.estimate_tricks_by_suit( follow_suit(s, valid_cards, True), follow_suit(s, unplayed, True), is_low, strict=st, ) ) return wl if not tp: # you have the lead safer_suits: List[Suit] = [ s for s in broken.keys() if broken[s] is False or broken[s] == self.team ] if broken else suits w: List[Card] = [] if safer_suits: # unbroken suits to lead aces px("Checking suits") w += winning_leads(safer_suits) else: # lead with good trump px("Leading with a good trump") w += winning_leads([Suit.TRUMP]) if not w: # try a risky ace px("Risky bet") w += winning_leads(suits, st=bool(self.teammates)) if not w and self.teammates: # seamless passing of the lead is_low = not is_low w += winning_leads(suits + [Suit.TRUMP], st=False) px("Lead pass") if not w: # YOLO time px("YOLO") return random.choice(valid_cards) px(w) return random.choice(w) # you don't have the lead # win if you can (and the current lead isn't on your team) # play garbage otherwise junk_ranks: Set[Rank] = ( {Rank.ACE_HI, Rank.KING} if is_low else {Rank.NINE, Rank.TEN, Rank.JACK} ) | {Rank.QUEEN} wc, wp = tp.winner(is_low) w = Hand(c for c in valid_cards if c.beats(wc, is_low)) junk_cards = Hand(h for h in valid_cards if h not in w) if w: # you have something that can win if wp in self.teammates and junk_cards: # your partner is winning if wc.rank in junk_ranks: # but their card is rubbish return random.choice(w) return random.choice(junk_cards) return random.choice(w) return random.choice(junk_cards) def simulate_hand(self, *, h_p: Hand, d_p: Hand, t: Bid, **kwargs) -> int: def slice_by_suit(h: Hand, s: Suit) -> Hand: return follow_suit( s, sorted( h.trumpify(t.trump_suit), key=key_trump_power, reverse=not t.is_low, ), strict=True, ok_empty=True, ) return sum( [ len( self.estimate_tricks_by_suit( my_suit=slice_by_suit(h_p, s), mystery_suit=slice_by_suit(d_p, s), is_low=t.is_low, is_trump=(s == Suit.TRUMP), ) ) for s in suits + [Suit.TRUMP] ] ) @staticmethod def estimate_tricks_by_suit( my_suit: Iterable[Card], mystery_suit: Iterable[Card], is_low: bool, is_trump: Optional[bool] = False, strict: bool = False, ) -> Hand: """ Slices up your hand and unplayed cards to estimate which suit has the most potential :param my_suit: list of your cards presumed of the same suit :param mystery_suit: unplayed cards of the suit :param is_low: lo no? :param is_trump: unused :param strict: True to pick a trick, False to estimate total tricks in a hand :return: winning cards for the suit """ est = Hand() for rank in ( euchre_ranks if is_low else [Rank.RIGHT_BOWER, Rank.LEFT_BOWER] + list(reversed(euchre_ranks)) ): me: List[Card] = match_by_rank(my_suit, rank) oth: List[Card] = match_by_rank(mystery_suit, rank) # p(f"{me} {rank} {oth}") # debugging est.extend(me) if oth and (strict or not me and not strict): break # there are mystery cards that beat your cards return est class Team(BaseTeam, MakesBid, WithScore): def __init__(self, players: Iterable[BasePlayer]): BaseTeam.__init__(self, players) MakesBid.__init__(self) WithScore.__init__(self) self.bid_history: List[str] = [] self.tricks_taken: List[int] = [] def hand_tab(self, hand: Optional[int], tab: str = "\t") -> str: return tab.join( [ str(self.bid_history[hand]), str(self.tricks_taken[hand]), str(self.score_changes[hand]), ] if hand is not None else [ str(sum([1 for b in self.bid_history if b != str(None)])), str(sum(self.tricks_taken)), str(self.score), ] ) class BidEuchre(BaseGame): def __init__(self, *, minimum_bid: int = 6, **kwargs): """ A game of bid euchre :param minimum_bid: minimum bid that will get stuck to the dealer :param kwargs: things to pass along to BaseGame """ # setup for the super() call if not kwargs.get("deck_replication"): kwargs["deck_replication"] = 2 if not kwargs.get("team_size"): kwargs["team_size"] = ( 2 if (h := kwargs.get("handedness")) and not (h % 2) else 1 ) if kwargs.get("pass_size") is None: kwargs["pass_size"] = 2 if kwargs.get("minimum_kitty_size") is None: kwargs["minimum_kitty_size"] = 0 if not kwargs.get("minimum_hand_size"): kwargs["minimum_hand_size"] = 8 super().__init__( human_player_type=HumanPlayer, computer_player_type=ComputerPlayer, team_type=Team, game_name="Euchre", deck_generator=make_euchre_deck, **kwargs, ) self.trump: Optional[Suit] = None self.low_win: bool = False # set the bidding c = configparser.ConfigParser() c.read("constants.cfg") minimum_bid: int = minimum_bid if minimum_bid else ( 6 if self.handedness == 3 else (self.hand_size // 2) ) self.valid_bids: List[int] = [ i for i in range(minimum_bid, self.hand_size + 1) ] + ( [round(self.hand_size * 1.5), self.hand_size * 2] if len(self.teams) != len(self.players) else [] ) if ( self.victory_threshold is not None and self.victory_threshold > 0 ): # negative thresholds get dunked on self.mercy_rule: int = -self.victory_threshold self.bad_ai_end: int = -self.victory_threshold // 2 else: self.victory_threshold: int = c["Scores"].getint("victory") self.mercy_rule: int = c["Scores"].getint("mercy") self.bad_ai_end: int = c["Scores"].getint("broken_ai") @property def shoot_strength(self) -> int: """Alias so I don't break existing code""" return self.pass_size def bidding(self, bid_order: List[EuchrePlayer]) -> EuchrePlayer: first_round: bool = True count: int = 1 hands: int = len(bid_order) wp: Optional[EuchrePlayer] = None wb: int = 0 bid_order = cycle(bid_order) min_bid: int = min(self.valid_bids) max_bid: int = max(self.valid_bids) for pl in bid_order: # everyone has passed if count == hands: if first_round: # stuck the dealer wb = min_bid p(f"Dealer {pl} got stuck with {min_bid}") if pl.is_bot: # dealer picks suit pl.make_bid(self.valid_bids, min_bid, pl) wp = pl else: # someone won the bid wb = min_bid - 1 break # end bidding early for a loner if min_bid > max_bid: wb = max_bid break # get the bid bid: int = pl.make_bid(self.valid_bids, min_bid, wp) # player passes if bid < min_bid: p(f"{pl} passes") count += 1 continue # bid successful min_bid = bid + 1 wp = pl count = 1 first_round = False p(f"{pl} bids {bid}") wp.team.bid = wb return wp def play_hand(self, dealer: EuchrePlayer) -> EuchrePlayer: self.deal() hn: int = len(dealer.team.score_changes) + 1 p(f"\nHand {hn}") p(f"Dealer: {dealer}") po: List[EuchrePlayer] = get_play_order(dealer) po.append(po.pop(0)) # because the dealer doesn't lead bidding # deal the cards for pl in po: pl.tricks = 0 pl.reset_bids() # bidding lead: EuchrePlayer = self.bidding(po) # declare Trump trump: Bid = lead.choose_trump p(trump) self.low_win = trump.is_low p(f"{lead} bid {lead.team.bid} {trump.name}\n") # modify hands if trump called [player.trumpify_hand(trump.trump_suit, trump.is_low) for player in po] self.unplayed_cards.trumpify(trump.trump_suit) # for card-counting self.suit_safety[trump.trump_suit] = None # check for shooters and loners lone: Optional[EuchrePlayer] = None if lead.team.bid > self.hand_size: if lead.team.bid < 2 * self.hand_size: lead.receive_shooter() lone = lead # play the tricks for _ in range(self.hand_size): lead = self.play_trick(lead, trump.is_low, lone) # calculate scores p(f"Hand {hn} scores:") for t in self.teams: tr_t: int = 0 ls: int = 0 bid: int = t.bid for pl in t.players: tr_t += pl.tricks if bid: # loners and shooters if lone: ls = bid bid = self.hand_size if tr_t < bid: p(f"{t} got Euchred and fell {bid - tr_t} short of {bid}") t.score = -bid if not ls else -bid * 3 // 2 elif ls: p(f"{lone} won all alone, the absolute madman!") t.score = ls else: p(f"{t} beat their bid of {bid} with {tr_t} tricks") t.score = tr_t + 2 else: # tricks the non-bidding team earned p(f"{t} earned {tr_t} tricks") t.score = tr_t # bookkeeping t.bid_history.append( f"{ls if ls else bid} {trump.name}" if bid else str(None) ) t.tricks_taken.append(tr_t) p(f"{t}: {t.score}") t.bid = 0 # reset for next time return dealer.next_player def play_trick( self, lead: EuchrePlayer, is_low: bool = False, lone: Optional[EuchrePlayer] = None, ) -> EuchrePlayer: pl: EuchrePlayer = lead po: List[EuchrePlayer] = get_play_order(lead) trick_in_progress: Trick = Trick() # play the cards for pl in po: if lone and pl in lone.teammates: continue c: Card = pl.play_card( trick_in_progress, handedness=self.handedness, is_low=is_low, broken_suits=self.suit_safety, trump=self.trump, ) trick_in_progress.append(TrickPlay(c, pl)) p(f"{pl.name} played {repr(c)}") # find the winner w: TrickPlay = trick_in_progress.winner(is_low) w.played_by.tricks += 1 p(f"{w.played_by.name} won the trick\n") l_suit: Suit = trick_in_progress.lead_suit if w.card.suit != l_suit: self.suit_safety[l_suit] = ( True if self.suit_safety[l_suit] else w.played_by.team ) return w.played_by def write_log(self, ld: str, splitter: str = "\t|\t") -> None: stop_time: str = str(datetime.now()).split(".")[0] f: TextIO = open(os.path.join(ld, f"{self.start_time}.gamelog"), "w") t_l: List[Team] = list(self.teams) # give a consistent ordering def w(msg): click.echo(msg, f) # headers w(splitter.join([self.start_time] + [f"{t}\t\t" for t in t_l])) w(splitter.join([""] + ["Bid\tTricks Taken\tScore Change" for _ in t_l])) w(splitter.join(["Hand"] + ["===\t===\t===" for _ in t_l])) w( # body "\n".join( [ splitter.join([f"{hand + 1}"] + [t.hand_tab(hand) for t in t_l]) for hand in range(len(t_l[0].bid_history)) ] ) ) # totals w(splitter.join([stop_time] + ["===\t===\t===" for _ in t_l])) w(splitter.join(["Totals"] + [t.hand_tab(None) for t in t_l])) f.close() def victory_check(self) -> Tuple[int, Optional[Team]]: scorecard: List[Team] = sorted(self.teams, key=score_key) best_score: int = scorecard[-1].score if best_score < self.bad_ai_end: return -2, None # everyone went too far negative if best_score == scorecard[-2].score: return 0, None # keep playing for a tie if best_score > self.victory_threshold: # a team won return 1, scorecard[-1] if scorecard[0].score < self.mercy_rule: # a team lost return -1, scorecard[0] # should never tie for last return 0, None def play(self) -> None: v: Tuple[int, Optional[Team]] = self.victory_check() global o while v[0] == 0: self.current_dealer = self.play_hand(self.current_dealer) v = self.victory_check() def final_score(pf: Callable = print): pf(f"\nFinal Scores") for t in self.teams: pf(f"{t}: {t.score}") pf(f"({len(self.current_dealer.team.bid_history)} hands)") final_score(p) if o: # final scores to terminal final_score() def score_key(t: Team) -> int: return t.score @click.command() @common_options @click.option( "--minimum-bid", type=click.IntRange(0, None), help="The minimum bid (will usually be 6 if not set)", ) def main(**kwargs): global o global debug global log_dir if kwargs.get("all_bots"): st: str = str(datetime.now()).split(".")[0] o = open(os.path.join(log_dir, f"{st}.gameplay"), "w") kwargs["start_time"] = st debug = True make_and_play_game(BidEuchre, log_dir, **kwargs) if __name__ == "__main__": Path(log_dir).mkdir(parents=True, exist_ok=True) main()
20,638
6,716
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server import util class ErrorResponse(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, detail: str = None, status: float = None, title: str = None, type: str = None): # noqa: E501 """ErrorResponse - a model defined in Swagger :param detail: The detail of this ErrorResponse. # noqa: E501 :type detail: str :param status: The status of this ErrorResponse. # noqa: E501 :type status: float :param title: The title of this ErrorResponse. # noqa: E501 :type title: str :param type: The type of this ErrorResponse. # noqa: E501 :type type: str """ self.swagger_types = { 'detail': str, 'status': float, 'title': str, 'type': str } self.attribute_map = { 'detail': 'detail', 'status': 'status', 'title': 'title', 'type': 'type' } self._detail = detail self._status = status self._title = title self._type = type @classmethod def from_dict(cls, dikt) -> 'ErrorResponse': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The ErrorResponse of this ErrorResponse. # noqa: E501 :rtype: ErrorResponse """ return util.deserialize_model(dikt, cls) @property def detail(self) -> str: """Gets the detail of this ErrorResponse. エラーメッセージ # noqa: E501 :return: The detail of this ErrorResponse. :rtype: str """ return self._detail @detail.setter def detail(self, detail: str): """Sets the detail of this ErrorResponse. エラーメッセージ # noqa: E501 :param detail: The detail of this ErrorResponse. :type detail: str """ if detail is None: raise ValueError("Invalid value for `detail`, must not be `None`") # noqa: E501 self._detail = detail @property def status(self) -> float: """Gets the status of this ErrorResponse. HTTPステータスコード # noqa: E501 :return: The status of this ErrorResponse. :rtype: float """ return self._status @status.setter def status(self, status: float): """Sets the status of this ErrorResponse. HTTPステータスコード # noqa: E501 :param status: The status of this ErrorResponse. :type status: float """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def title(self) -> str: """Gets the title of this ErrorResponse. タイトル # noqa: E501 :return: The title of this ErrorResponse. :rtype: str """ return self._title @title.setter def title(self, title: str): """Sets the title of this ErrorResponse. タイトル # noqa: E501 :param title: The title of this ErrorResponse. :type title: str """ self._title = title @property def type(self) -> str: """Gets the type of this ErrorResponse. タイプ # noqa: E501 :return: The type of this ErrorResponse. :rtype: str """ return self._type @type.setter def type(self, type: str): """Sets the type of this ErrorResponse. タイプ # noqa: E501 :param type: The type of this ErrorResponse. :type type: str """ self._type = type
3,908
1,195
#!/usr/bin/env python # coding: utf-8 from dh import dh_solver #from IPython.display import Latex import sympy from sympy import Symbol import numpy as np #create an object mtb = dh_solver() # ## adding the dh paramters # just use obj.add() method to add a set of dh paramters in the kinematic chain, the first one to add is the base and the last one is the end effector, the method takes input a list \[ d, theta, a, alpha\] # # you can get the parameters from vrep http://www.forum.coppeliarobotics.com/viewtopic.php?f=9&t=5367 # In[3]: #you can add the paramters wiht the variable as string mtb.add([0,Symbol("theta1"),0.467,0]) #or you can add the variable as a Sympy symbol, in this case you can also shift the variable mtb.add([0,Symbol("theta2")+sympy.pi/2,0.4005,0]) mtb.add([0.2,sympy.pi/3,0,Symbol("alpha3")]) #to get the dh matrices in symbolic form T = mtb.calc_symbolic_matrices() print(T) #simplifing T sympy.simplify(T) #to get the intermediate the transormation matrices print(mtb.T_list) #to get the matrix with the constants substituted T1 = mtb.calc_dh_matrix() T2 = sympy.simplify(T1) print(T2) #printing T2 in latex # a = sympy.latex(T2) # print(a) #to substitute with the variables and return a numpy array of floats, all variables must be subistituted arr = mtb.get_numpy_matrix([ ["theta1", sympy.pi/2], ["theta2", sympy.pi/3] ,["alpha3", 0.5]]) print(arr) # to call obj.get_numpy_matrix() you have to at least have called obj.calc_dh_matrix() and of coarse added your parameters :D
1,534
572
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from pants.testutil.pants_integration_test import run_pants def test_goals() -> None: pants_run = run_pants(["goals"]) pants_run.assert_success() assert "to get help for a particular goal" in pants_run.stdout # Spot check a few core goals. for goal in ["filedeps", "list", "roots", "validate"]: assert goal in pants_run.stdout def test_only_show_implemented_goals() -> None: # Some core goals, such as `./pants test`, require downstream implementations to work # properly. We should only show those goals when an implementation is provided. goals_that_need_implementation = ["binary", "fmt", "lint", "run", "test"] command = ["--pants-config-files=[]", "goals"] not_implemented_run = run_pants(["--backend-packages=[]", *command]) not_implemented_run.assert_success() for goal in goals_that_need_implementation: assert goal not in not_implemented_run.stdout implemented_run = run_pants( [ "--backend-packages=['pants.backend.python', 'pants.backend.python.lint.isort']", *command, ], ) implemented_run.assert_success() for goal in goals_that_need_implementation: assert goal in implemented_run.stdout def test_ignored_args() -> None: # Test that arguments (some of which used to be relevant) are ignored. pants_run = run_pants(["goals", "--all", "--graphviz", "--llama"]) pants_run.assert_success() assert "to get help for a particular goal" in pants_run.stdout
1,646
521
def extract_smiles(): return ["c1ccccc1", "Cc1ccccc1", "c1ccccc1", "CCO"]
78
37
""" A Python Class A simple Python graph class to do essential operations into graph. """ import operator import math from random import choice from collections import defaultdict import networkx as nx class ProA(): def __init__(self, graph): """ Initializes util object. """ self.__graph = graph self.__relations = {} self.__relations_distribution = defaultdict(int) self.__hits1 = 0.0 self.__hits3 = 0.0 self.__hits5 = 0.0 self.__hits10 = 0.0 def clear(self): """ Clear current graph """ self.__graph.clear() def set_graph(self, graph): """ A method to set graph. """ self.__graph = graph def get_graph(self): """ A method to get graph. """ return self.__graph def get_hits1(self): """ A method to get hits1. """ return self.__hits1 def get_hits3(self): """ A method to get hits3. """ return self.__hits3 def get_hits5(self): """ A method to get hits5. """ return self.__hits5 def get_hits10(self): """ A method to get hits10. """ return self.__hits10 def set_relation(self, source, target, relation): """ A method to set an edge label. """ self.__relations[(source,target)] = relation def get_relation(self, source, target): """ A method to return an edge label. """ try: return self.__relations[(source,target)] except KeyError: try: return self.__relations[(target,source)] except KeyError: pass def get_domain(self, source): """ Get domain from outgoings relations from source vertex. """ try: dicti = defaultdict(int) for neighbor in self.__graph.neighbors(source): relation = self.get_relation(source, neighbor).split('/') dicti[relation[1]] += 1 sorted_dicti = sorted(dicti.items(), key=operator.itemgetter(1)) return sorted_dicti[0][0] except IndexError: pass def generate_distribution(self, source, target, length): """ Generate relations distribution from a source to target. """ paths = nx.all_simple_paths(self.__graph, source, target, cutoff=length) paths = list(paths) print 'len', len(paths) distribution = defaultdict(int) for path in paths: relations_list = list() for i in range(0, len(path) - 1): # print path[i], path[i + 1], self.get_relation(path[i], path[i+1]) relations_list.append(self.get_relation(path[i], path[i+1])) # print 'list', relations_list distribution[tuple(relations_list)] += 1 return distribution def recur_generate_paths(self, g, node_initial, node_source, node_target, distribution, key, index, dicti, source, target): """ Recursive method do generate dictionary from exists edges between v1 and v2 until the limit passed. """ if key[index] == self.get_relation(node_source, node_target): index = index + 1 if len(key) > index: for neighbor in g.neighbors(node_target): self.recur_generate_paths(g, node_initial, node_target, neighbor, distribution, key, index, dicti, source, target) else: if source == node_initial and target == node_target: pass else: dicti[self.get_relation(node_initial, node_target)] += 1 def generate_edges_between_paths(self, distribution, source, target): """ Generate dictionary from exists edges between v1 and v2. """ path_distribution = {} g = self.get_graph() for key, value in distribution.iteritems(): print '-------- Calculating: ', key,'---------' dicti = defaultdict(int) for edge in g.edges(): try: self.recur_generate_paths(g, edge[0], edge[0], edge[1], distribution, key, 0, dicti, source, target) except IndexError: pass path_distribution[key] = dicti return path_distribution def generate_final_distribution(self, distribution, distribution_path): """ Generate final distribution from possible edges. """ total_edges = float(sum(distribution.values())) final_path_distribution = defaultdict(float) for dist in distribution: final_path_distribution[dist] += float(distribution[dist])/total_edges final_distribution = defaultdict(float) for path in distribution_path: temp_total = 0 for path2 in distribution_path[path]: temp_total += distribution_path[path][path2] for path2 in distribution_path[path]: final_distribution[path2] += (float(distribution_path[path][path2])/temp_total)*final_path_distribution[path] return final_distribution def evaluate(self, MMR, final_distribution_sorted, edge_to_be_predicted): """ Evaluate MMR. """ count = 0.0 for relation, probability in final_distribution_sorted: print 'Predicting', relation if relation == edge_to_be_predicted: count += 1.0 break if relation == None and probability > 0.92: count += 1.0 elif relation != None: count += 1.0 if count == 0: count = 20.0 else: MMR += (1.0/count) self.update_hits(count) return MMR def update_hits(self, count): """ Evaluate Hits. """ if count == 1: self.__hits1 += 1 if count <= 3: self.__hits3 += 1 if count <= 5: self.__hits5 += 1 if count <= 10: self.__hits10 += 1 def calculate_entropy(self, source, target): """ Calculates the entropy from source and target. """ prod = 1.0 for i in range(1, self.__graph.degree(target)+1): prod = prod * (float(self.__graph.number_of_edges()-self.__graph.degree(source)-i+1)/float(self.__graph.number_of_edges()-i+1)) return -math.log(1 - prod, 2) def calculate_common_neighbors(self, source, target): """ Calculates the common neighbors from source and target. """ return sorted(nx.common_neighbors(self.__graph, source, target)) def calculate_resource_allocation(self, source, target): """ Calculates the common neighbors from source and target. """ return nx.resource_allocation_index(self.__graph, [(source, target)]) def random_walk(self): """ A method to get started a random walk into graph selecting a node from random. """ print 'Number of nodes', self.__graph.number_of_nodes() print 'Number of edges', self.__graph.number_of_edges() # Get a node randomly # Probability to get this first node is 1/N seed = choice(self.__graph.nodes()) print 'Selected a node randomly', seed print 'Degree', self.__graph.degree(seed) print 'In degree', self.__graph.in_degree(seed) print 'Out degree', self.__graph.out_degree(seed) print 'Successors', self.__graph.successors(seed) num_edges = len(self.__graph.edges()) prob_vertex = {} entropy_vertex = {} for possibility in self.__graph.nodes(): if possibility != seed: if possibility not in self.__graph.successors(seed): prod = 1.0 for i in range(self.__graph.degree(possibility)): prod = prod * ((num_edges-self.__graph.degree(seed)+(-i+1)+1)/float(num_edges+(-i+1)+1)) prob_vertex[possibility] = 1 - prod entropy_vertex[possibility] = -math.log(1 - prod) prob_vertex = sorted(prob_vertex.items(), key=operator.itemgetter(1)) entropy_vertex = sorted(entropy_vertex.items(), key=operator.itemgetter(1)) print entropy_vertex print seed # Print edges with relation # print DG.edges(data='relation') def entropy(self, source, target): """ A method to get started entropy calculation into graph selecting a node. """ print('source:', source, 'target:', target, 'entropy:', self.calculate_entropy(source, target)) def predict_facts(self, source, target, length): """ A method to predict facts based on shannon entropy. """ print(source, target) print 'Selected a node', source print 'Source Degree', self.__graph.degree(source) print 'Neighbors', self.__graph.neighbors(source) print 'Target Degree', self.__graph.degree(target) print 'Neighbors', self.__graph.neighbors(target) # print(sorted(nx.all_neighbors(self.__graph, source))) print(len(self.__graph.edges())) # print(self.__graph.edges()) count = 0.0 for edge in self.__graph.edges(): if edge[0] == 'teamplayssport' or edge[1] == 'teamplayssport': count = count + 1 # print(edge) # print 'In degree', self.__graph.in_degree(source) # print 'Out degree', self.__graph.out_degree(source) # print 'Successors', self.__graph.successors(source) # print(sorted(nx.common_neighbors(self.__graph, source, target))) print(count) print(count/(len(self.__graph.edges())))
9,903
2,779
#!/usr/bin/env python # -*- coding: utf-8 -*- import shutil from os.path import basename, exists, isdir, splitext from sfzparser import SFZParser def main(args=None): fn = args[0] bn = splitext(basename(fn))[0] parser = SFZParser(fn) fixed = False for name, sect in parser.sections: # fix sample filename without directory prefix if name == 'region' and 'sample' in sect and isdir(bn) and '/' not in sect['sample']: print("Setting prefix for sample '{}' to '{}'.".format(sect['sample'], bn)) sect['sample'] = bn + '/' + sect['sample'] fixed = True if fixed: if not exists(fn + '.bak'): shutil.copy(fn, fn + '.bak') with open(args[0], 'w') as sfz: for name, sect in parser.sections: if name == 'comment': sfz.write(sect + '\n') else: sfz.write("<{}>\n".format(name)) for key, value in sorted(sect.items()): sfz.write(" {}={}\n".format(key, value)) else: print("Nothing to fix.") if __name__ == '__main__': import sys sys.exit(main(sys.argv[1:] or 0))
1,214
387
"""Kerasの各種モデル。""" # pylint: skip-file # flake8: noqa from . import darknet53, efficientnet, xception
103
50
import unittest from huobi.rest.client import HuobiRestClient from huobi.rest.error import ( HuobiRestiApiError ) import os from os.path import join, dirname from dotenv import load_dotenv dotenv_path = join(dirname(dirname(dirname(dirname(__file__)))), '.env') load_dotenv(dotenv_path) class TestCommonEndpoint(unittest.TestCase): def setUp(self): access_key = os.environ['ACCESS_KEY'] secret_key = os.environ['SECRET_KEY'] self.client = HuobiRestClient( access_key=access_key, secret_key=secret_key) def tearDown(self): self.client.close() class TestCommonSymbols(TestCommonEndpoint): def test_success(self): res = self.client.symbols() self.assertEqual(res.res.status_code, 200) self.assertIn('data', res.data) self.assertIsInstance(res.data['data'], list) def test_authentication_fail(self): client = HuobiRestClient( access_key='1', secret_key='2', ) with self.assertRaises(HuobiRestiApiError): client.accounts() class TestCommonCurrencies(TestCommonEndpoint): def test_success(self): res = self.client.currencies() self.assertEqual(res.res.status_code, 200) def test_alias(self): res = self.client.currencys() self.assertEqual(res.res.status_code, 200) class TestCommonTimestamp(TestCommonEndpoint): def test_success(self): res = self.client.timestamp() self.assertEqual(res.res.status_code, 200)
1,536
494
# # Copyright 2017 Vitalii Kulanov # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests from flask import Blueprint, current_app, flash, Markup, render_template, \ request, redirect, url_for from gerritclient import client from gerritclient import error as client_error from gerritviewer import common from .forms import CreateUserAccountForm, EditContactInfoForm, \ QueryUserAccountForm accounts = Blueprint('accounts', __name__) @accounts.route('/accounts', methods=['GET', 'POST']) def fetch(): form = QueryUserAccountForm() gerrit_accounts = None account_client = client.get_client('account', connection=common.get_connection()) try: if form.validate_on_submit(): gerrit_accounts = account_client.get_all( form.query_string.data, detailed=form.details.data) flash(Markup("Search results for <strong>'{}'</strong>: {}".format( form.query_string.data, "Nothing Found" if not gerrit_accounts else '')), category='note') except (requests.ConnectionError, client_error.HTTPError) as error: current_app.logger.error(error) flash(error, category='error') return render_template('accounts/accounts.html', gerrit_url=common.get_gerrit_url(), gerrit_version=common.get_version(), entry_category='accounts', entries=gerrit_accounts, form=form) @accounts.route('/accounts/<account_id>') def fetch_single(account_id): account = {} account_client = client.get_client('account', connection=common.get_connection()) try: account = account_client.get_by_id( account_id, detailed=request.args.get('details', False)) account['is_active'] = account_client.is_active(account_id) account['membership'] = account_client.get_membership(account_id) action = request.args.get('action') if action: account_actions = {'enable': account_client.enable, 'disable': account_client.disable} account_actions[action](account_id) flash(Markup("Account with <strong>ID={}</strong> was " "successfully <strong>{}d</strong>".format( account_id, action)), category='note') return redirect(url_for('accounts.fetch_single', account_id=account_id)) except (requests.ConnectionError, client_error.HTTPError) as error: current_app.logger.error(error) flash(error, category='error') return render_template('accounts/profile.html', gerrit_url=common.get_gerrit_url(), gerrit_version=common.get_version(), entry_category='accounts', entry_item=account, entry_item_name=account.get('name')) @accounts.route('/accounts/contact/<account_id>', methods=['GET', 'POST']) def edit_contact_info(account_id): form = EditContactInfoForm() account = {} account_client = client.get_client('account', connection=common.get_connection()) try: account = account_client.get_by_id(account_id, detailed=False) current_status = get_account_status(account_id) if form.validate_on_submit(): fullname, username = form.fullname.data, form.username.data status = form.status.data response = {} if account.get('name') != fullname: response['full name'] = account_client.set_name(account_id, fullname) if username and account.get('username') != username: response['username'] = account_client.set_username(account_id, username) if status != current_status: response['status'] = account_client.set_status(account_id, status) or '' if response: flash(Markup("The following parameters were successfully " "updated: {0}".format(", ".join( ":: ".join(_) for _ in response.items()))), category='note') return redirect(url_for('accounts.fetch_single', account_id=account_id)) except (requests.ConnectionError, client_error.HTTPError) as error: current_app.logger.error(error) flash(error, category='error') return render_template('accounts/contacts.html', gerrit_url=common.get_gerrit_url(), gerrit_version=common.get_version(), entry_category='accounts', entry_item=account, entry_item_name=account.get('name'), form=form) @accounts.route('/accounts/ssh/<account_id>') def ssh(account_id): account_client = client.get_client('account', connection=common.get_connection()) account, ssh_keys = {}, [] try: account = account_client.get_by_id(account_id, detailed=False) ssh_keys = account_client.get_ssh_keys(account_id) except (requests.ConnectionError, client_error.HTTPError) as error: current_app.logger.error(error) flash(error, category='error') return render_template('accounts/ssh.html', gerrit_url=common.get_gerrit_url(), gerrit_version=common.get_version(), entry_category='accounts', entry_item=account, entry_item_name=account.get('name'), entries=ssh_keys) @accounts.route('/accounts/create', methods=['GET', 'POST']) def create(): form = CreateUserAccountForm() if form.validate_on_submit(): account_client = client.get_client('account', connection=common.get_connection()) data = {k: v for k, v in (('username', form.username.data), ('name', form.fullname.data), ('email', form.email.data)) if v} try: response = account_client.create(form.username.data, data=data) msg = Markup("A new user account '<strong>{0}</strong>' " "with ID={1} was successfully created.".format( response['username'], response['_account_id'])) flash(msg, category='note') return redirect(url_for('accounts.fetch_single', account_id=response['_account_id'])) except (requests.ConnectionError, client_error.HTTPError) as error: current_app.logger.error(error) flash(error, category='error') return render_template('accounts/create.html', gerrit_url=common.get_gerrit_url(), gerrit_version=common.get_version(), form=form) # Status of account is only available since gerrit 2.14, # so we have to fetch it in a proper way for all versions def get_account_status(account_id): account_client = client.get_client('account', connection=common.get_connection()) try: current_status = account_client.get_status(account_id) except client_error.HTTPError: current_status = None return current_status
8,477
2,150
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Measures the average time used by git-push command in CQ based on data from chromium-cq-status.appspot.com.""" import argparse import json import logging import sys import urllib CQ_STATUS_QUERY_URL = 'http://chromium-cq-status.appspot.com/query' def load_options(): parser = argparse.ArgumentParser(description=sys.modules['__main__'].__doc__) parser.add_argument('--project', default='chromium', help='Project name.') parser.add_argument('--count', '-c', default=1000, type=int, required=True, help='Number of issues to average over.') parser.add_argument('--verbose', '-v', action='store_true', help='Print debugging messages to console') return parser.parse_args() def get_stats(filters, cursor=None): url = '%s/%s' % (CQ_STATUS_QUERY_URL, '/'.join(filters)) if cursor: url += '?cursor=%s' % cursor logging.debug('Loading %s', url) data = json.load(urllib.urlopen(url)) return data['results'], data['cursor'], data['more'] def main(): options = load_options() filters = [] if options.project: filters += ['project=%s' % options.project] logging.basicConfig(level=logging.DEBUG if options.verbose else logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # We search for committed timestamps first, because this guarantees that all # these issues will also have comitting timestamp. The opposite is not always # true - some issues with committing timestamp may not be comitted yet. logging.info('Searching for committed issues') issues = [] cursor = None more = True while len(issues) < options.count and more: results, cursor, more = get_stats(filters + ['action=patch_committed'], cursor) for result in results: issues.append({'issue': result['fields']['issue'], 'patchset': result['fields']['patchset'], 'committed': result['fields']['timestamp']}) if len(issues) > options.count: issues = issues[:options.count] logging.debug('Searching committing timestamp for found issues') for issue in issues: results, _, _ = get_stats(filters + ['action=patch_committing', 'issue=%s' % issue['issue'], 'patchset=%s' % issue['patchset']]) assert len(results) >= 1, 'Incorrect number of results: %s' % results issue['committing'] = results[0]['fields']['timestamp'] logging.debug(issues) push_times = [i['committed'] - i['committing'] for i in issues] average_push_time = sum(push_times) / len(push_times) print 'Average git push time is %.2f seconds' % average_push_time if __name__ == '__main__': sys.exit(main())
2,928
870
import random def create_random_id(): return str(random.randint(100000,999999999999999))
93
45
from bitmovin_api_sdk.encoding.infrastructure.kubernetes.configuration.configuration_api import ConfigurationApi
113
28
# Copyright (c) 2020, NVIDIA CORPORATION. from __future__ import annotations from typing import TYPE_CHECKING, Optional, Union, overload from typing_extensions import Literal import cudf if TYPE_CHECKING: from cudf.core.column import ColumnBase class ColumnMethodsMixin: _column: ColumnBase _parent: Optional[Union["cudf.Series", "cudf.Index"]] def __init__( self, column: ColumnBase, parent: Union["cudf.Series", "cudf.Index"] = None, ): self._column = column self._parent = parent @overload def _return_or_inplace( self, new_col, inplace: Literal[False], expand=False, retain_index=True ) -> Union["cudf.Series", "cudf.Index"]: ... @overload def _return_or_inplace( self, new_col, expand: bool = False, retain_index: bool = True ) -> Union["cudf.Series", "cudf.Index"]: ... @overload def _return_or_inplace( self, new_col, inplace: Literal[True], expand=False, retain_index=True ) -> None: ... @overload def _return_or_inplace( self, new_col, inplace: bool = False, expand: bool = False, retain_index: bool = True, ) -> Optional[Union["cudf.Series", "cudf.Index"]]: ... def _return_or_inplace( self, new_col, inplace=False, expand=False, retain_index=True ): """ Returns an object of the type of the column owner or updates the column of the owner (Series or Index) to mimic an inplace operation """ if inplace: if self._parent is not None: self._parent._mimic_inplace( self._parent.__class__._from_table( cudf._lib.table.Table({self._parent.name: new_col}) ), inplace=True, ) return None else: self._column._mimic_inplace(new_col, inplace=True) return None else: if self._parent is None: return new_col if expand or isinstance( self._parent, (cudf.DataFrame, cudf.MultiIndex) ): # This branch indicates the passed as new_col # is a Table table = new_col if isinstance(self._parent, cudf.BaseIndex): idx = self._parent._constructor_expanddim._from_table( table=table ) idx.names = None return idx else: return self._parent._constructor_expanddim( data=table._data, index=self._parent.index ) elif isinstance(self._parent, cudf.Series): if retain_index: return cudf.Series( new_col, name=self._parent.name, index=self._parent.index, ) else: return cudf.Series(new_col, name=self._parent.name) elif isinstance(self._parent, cudf.BaseIndex): return cudf.core.index.as_index( new_col, name=self._parent.name ) else: return self._parent._mimic_inplace(new_col, inplace=False)
3,427
972
from selenium.common.exceptions import NoSuchElementException, TimeoutException class DomHelper(object): driver = None waiter = None def open_page(self, url): self.driver.get(url) def reload_page(self): self.driver.refresh() def print_el(self, element): print('tag: ' + element.tag_name + ' id: ' + element.get_attribute('id') + ' class: ' + element.get_attribute('class') + ' text: ' + element.text) def get_el(self, selector): if isinstance(selector, str): return self.driver.find_element_by_css_selector(selector) else: return selector def get_els(self, selector): if isinstance(selector, str): return self.driver.find_elements_by_css_selector(selector) else: return selector def get_child_el(self, parent, selector): try: return parent.find_element_by_css_selector(selector) except NoSuchElementException: return None def get_child_els(self, parent, selector): return parent.find_elements_by_css_selector(selector) def is_el_present(self, selector): try: self.driver.find_element_by_css_selector(selector) return True except NoSuchElementException: return False def verify_el_present(self, selector): if not self.is_el_present(selector): raise Exception('Element %s not found' % selector) def is_el_visible(self, selector): return self.get_el(selector).is_displayed() def click_button(self, selector): if self.driver.name == 'iPhone': self.driver.execute_script('$("%s").trigger("tap")' % (selector)) else: self.get_el(selector).click() def enter_text_field(self, selector, text): text_field = self.get_el(selector) text_field.clear() text_field.send_keys(text) def select_checkbox(self, selector, name, deselect=False): found_checkbox = False checkboxes = self.get_els(selector) for checkbox in checkboxes: if checkbox.get_attribute('name') == name: found_checkbox = True if not deselect and not checkbox.is_selected(): checkbox.click() if deselect and checkbox.is_selected(): checkbox.click() if not found_checkbox: raise Exception('Checkbox %s not found.' % (name)) def select_option(self, selector, value): found_option = False options = self.get_els(selector) for option in options: if option.get_attribute('value') == str(value): found_option = True option.click() if not found_option: raise Exception('Option %s not found' % (value)) def get_selected_option(self, selector): options = self.get_els(selector) for option in options: if option.is_selected(): return option.get_attribute('value') def is_option_selected(self, selector, value): options = self.get_els(selector) for option in options: if option.is_selected() != (value == option.get_attribute('value')): print(option.get_attribute('value')) return False return True def is_text_equal(self, selector, text): return self.get_el(selector).text == text def verify_inputs_checked(self, selector, checked): checkboxes = self.get_els(selector) for checkbox in checkboxes: name = checkbox.get_attribute('name') if checkbox.is_selected() != (name in checked): raise Exception('Input isnt checked as expected - %s' % (name)) def verify_option_selected(self, selector, value): if not self.is_option_selected(selector, value): raise Exception('Option isnt selected as expected') def verify_radio_value(self, selector, value): value = str(value) radios = self.get_els(selector) for radio in radios: radio_value = radio.get_attribute('value') if radio.is_selected() and radio_value != value: raise Exception('Radio with value %s is checked and shouldnt be' % radio_value) elif not radio.is_selected() and radio_value == value: raise Exception('Radio with value %s isnt checked and should be' % radio_value) def verify_text_field(self, selector, text): text_field = self.get_el(selector) value = text_field.get_attribute('value') if value != text: raise Exception('Text field contains %s, not %s' % (value, text)) def verify_text_value(self, selector, value): text_field = self.get_el(selector) if text_field.get_attribute('value') != value: raise Exception('Value of %s not equal to "%s" - instead saw "%s"' % (selector, value, text_field.get_attribute('value'))) def verify_text_of_el(self, selector, text): if not self.is_text_equal(selector, text): raise Exception('Text of %s not equal to "%s" - instead saw "%s"' % (selector, text, self.get_el(selector).text)) def verify_text_in_els(self, selector, text): els = self.get_els(selector) found_text = False for el in els: if text in el.text: found_text = True if not found_text: raise Exception('Didnt find text: %s' % (text)) def verify_text_not_in_els(self, selector, text): els = self.get_els(selector) found_text = False for el in els: if text in el.text: found_text = True if found_text: raise Exception('Found text: %s' % (text)) def is_button_enabled(self, selector): return (self.get_el(selector).get_attribute('disabled') == 'false') def check_title(self, title): return self.driver.title == title or self.driver.title == 'eatdifferent.com: ' + title def wait_for(self, condition): self.waiter.until(lambda driver: condition()) def check_num(self, selector, num): els = self.get_els(selector) return len(els) == num def wait_for_num_els(self, selector, num): try: self.waiter.until(lambda driver: self.check_num(selector, num)) except TimeoutException: raise Exception('Never saw %s number of els for %s' % (num, selector)) def wait_for_visible(self, selector): try: self.waiter.until(lambda driver: self.is_el_visible(selector)) except TimeoutException: raise Exception('Never saw element %s become visible' % (selector)) def wait_for_hidden(self, selector): try: self.waiter.until(lambda driver: not self.is_el_visible(selector)) except TimeoutException: raise Exception('Never saw element %s become hidden' % (selector)) def wait_for_button(self, selector): try: self.waiter.until(lambda driver: self.is_button_enabled(selector)) except TimeoutException: raise Exception('Never saw button %s enabled' % (selector)) def wait_for_text(self, selector, text): try: self.waiter.until(lambda driver: self.is_text_equal(selector, text)) except TimeoutException: raise Exception('Never saw text %s for %s' % (text, selector)) def wait_for_el(self, selector): try: self.waiter.until(lambda driver: self.is_el_present(selector)) except TimeoutException: raise Exception('Never saw element %s' % (selector)) def wait_for_title(self, title): try: self.waiter.until(lambda driver: self.check_title(title)) except TimeoutException: raise Exception('Never saw title change to %s' % (title)) def __init__(self, driver, waiter): self.driver = driver self.waiter = waiter
8,139
2,254
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.contrib.auth.models import User from django.contrib.postgres.fields import ArrayField # Create your models here. """ class NewSearch(models.Model): SINGLEVIEW = 'similarityindex' MULTIVIEW = 'updated_similarity_index' DATABASE_CHOICES = ( (MULTIVIEW, 'Multiview Model'), (SINGLEVIEW, 'Single View Model'), ) H1 = "\'H1\'" H1L1 = "\'H1\', \'L1\'" H1L1V1 = "\'H1\', \'L1\', \'V1\'" L1 = "\'L1\'" L1V1 = "\'L1\', \'V1\'" V1 = "\'V1\'" IFO_CHOICES = ( (H1L1, 'H1 L1'), (H1, 'H1'), (H1L1V1, 'H1 L1 V1'), (L1, 'L1'), (L1V1, 'L1 V1'), (V1, 'V1'), ) database = models.ChoiceField(choices=DATABASE_CHOICES,) howmany = models.IntegerField(label='How many similar images would you like to return', max_value=500, min_value=1) zooid = models.CharField(label = 'This is the Zooniverse assigned random ID of the image (an integer value)', max_length=10, required=False) imageid = models.CharField(label='The GravitySpy uniqueid (this is the 10 character hash that uniquely identifies all gravity spy images)', max_length=10, required=False) ifo = models.ChoiceField(choices=IFO_CHOICES,) user = models.ForeignKey(User) new_subjects = ArrayField(models.CharField(max_length=10), blank=True) created_at = models.DateTimeField(auto_now_add=True) """
1,491
574
from machine import Pin class simple_encoder(): def __init__(self, ra, rb, pin_irq): self.ra = ra self.rb = rb self.counter = 0 self.ra.irq(trigger=pin_irq, handler=self.turn) self.rb.irq(trigger=pin_irq, handler=self.turn) def turn(self, pin): changed = False enc_turn = 0 while (not self.ra.value()) or (not self.rb.value()): if not changed: if self.ra.value() == pin.value(): enc_turn = 2 self.counter = self.counter + 1 if self.rb.value() == pin.value(): enc_turn = 1 self.counter = self.counter - 1 changed = True return True
776
251
# Example of Naive Bayes implemented from Scratch in Python import csv import random import math import xgboost as xgb import matplotlib.pyplot as plt import numpy as np def loadCsv(filename): lines = csv.reader(open(filename, "r")) dataset = list(lines) for i in range(len(dataset)): dataset[i] = [float(x) for x in dataset[i]] return dataset def loadDataset_ckd(filename, trainingSet=[]): lines = csv.reader(open(filename, "r")) dataset = list(lines) #print(len(dataset),range(len(dataset))) for x in range(len(dataset)): for y in range(15): dataset[x][y] = float(dataset[x][y]) trainingSet.append(dataset[x]) def loadDataset_ckd1(filename, testSet=[]): lines1 = csv.reader(open(filename, "r")) dataset1 = list(lines1) #print(len(dataset1),range(len(dataset1))) for x in range(len(dataset1)): for y in range(15): dataset1[x][y] = float(dataset1[x][y]) testSet.append(dataset1[x]) def loadDataset_ml(filename, trainingSet=[]): lines = csv.reader(open(filename, "r")) dataset = list(lines) for x in range(len(dataset)): for y in range(9): dataset[x][y] = float(dataset[x][y]) trainingSet.append(dataset[x]) def loadDataset_hd(filename, trainingSet=[]): lines = csv.reader(open(filename, "r")) dataset = list(lines) for x in range(len(dataset)): for y in range(12): dataset[x][y] = float(dataset[x][y]) trainingSet.append(dataset[x]) def loadDataset_ml1(filename, testSet=[]): lines1 = csv.reader(open(filename, "r")) dataset1 = list(lines1) #print(len(dataset1),range(len(dataset1))) for x in range(len(dataset1)): for y in range(9): dataset1[x][y] = float(dataset1[x][y]) testSet.append(dataset1[x]) def loadDataset_hd1(filename, testSet=[]): lines1 = csv.reader(open(filename, "r")) dataset1 = list(lines1) #print(len(dataset1),range(len(dataset1))) for x in range(len(dataset1)): for y in range(12): dataset1[x][y] = float(dataset1[x][y]) testSet.append(dataset1[x]) def splitDataset(dataset, splitRatio): trainSize = int(len(dataset) * splitRatio) trainSet = [] copy = list(dataset) while len(trainSet) < trainSize: index = random.randrange(len(copy)) trainSet.append(copy.pop(index)) return [trainSet, copy] def separateByClass(dataset): separated = {} for i in range(len(dataset)): vector = dataset[i] if (vector[-1] not in separated): separated[vector[-1]] = [] separated[vector[-1]].append(vector) return separated def mean(numbers): return sum(numbers)/float(len(numbers)) def stdev(numbers): avg = mean(numbers) variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1) return math.sqrt(variance) def summarize(dataset): summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)] del summaries[-1] return summaries def summarizeByClass(dataset): separated = separateByClass(dataset) summaries = {} #print(separated) for classValue, instances in separated.items(): #print(instances) summaries[classValue] = summarize(instances) return summaries def calculateProbability(x, mean, stdev): #print(x,mean,stdev) if(x==0 and mean==0 and stdev==0): x = 1 mean = 1 stdev = 1 #print(x,mean,stdev) part2 = (2*math.pow(stdev,2)) if(part2==0) : part2 = 0.1 #print(part2) exponent = math.exp(-(math.pow(x-mean,2)/part2)) part3 = (math.sqrt(2*math.pi) * stdev) if(part3==0) : part3 = 0.1 fin = (1 / part3) * exponent return fin def calculateClassProbabilities(summaries, inputVector): probabilities = {} for classValue, classSummaries in summaries.items(): probabilities[classValue] = 1 for i in range(len(classSummaries)): mean, stdev = classSummaries[i] x = inputVector[i] probabilities[classValue] *= calculateProbability(x, mean, stdev) return probabilities def predict(summaries, inputVector): probabilities = calculateClassProbabilities(summaries, inputVector) bestLabel, bestProb = None, -1 for classValue, probability in probabilities.items(): if bestLabel is None or probability > bestProb: bestProb = probability bestLabel = classValue return bestLabel def getPredictions(summaries, testSet): predictions = [] for i in range(len(testSet)): result = predict(summaries, testSet[i]) predictions.append(result) return predictions def getAccuracy(testSet, predictions): correct = 0 for i in range(len(testSet)): if testSet[i][-1] == predictions[i]: correct += 1 return (correct/float(len(testSet))) * 100.0 def main(): print ('\n~~~~~~~~~~~'); #checking of presence of ckd disease # prepare data matched_count = 0 ; total_datas = 0 trainingSet=[] testSet=[] loadDataset_ckd('dataset_ckd_train.csv', trainingSet) total_datas = total_datas+int(repr(len(trainingSet))) loadDataset_ckd1('dataset_ckd_test.csv', testSet) print ('Train set of ckd: ',repr(len(trainingSet))) #print ('Train set: ', trainingSet) #print ('Test set: ', repr(len(testSet))) print ('Input for CKD disease related parameters :\n ',testSet) summaries = summarizeByClass(trainingSet) matched_count = matched_count+int(repr(len(summaries))) print('matches: ',repr(len(summaries))) # test model predictions = getPredictions(summaries, testSet) #print('> predicted=' , predictions) print('> disease presence =' , predictions ) accuracy = getAccuracy(testSet, predictions) #print('Accuracy: {0}%').format(accuracy) #print('Accuracy: ',accuracy) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ print ('\n~~~~~~~~~~~'); #checking of presence of diabetes disease trainingSet=[] testSet=[] loadDataset_ml('dataset_diabetes_train.csv', trainingSet) total_datas = total_datas+int(repr(len(trainingSet))) loadDataset_ml1('dataset_diabetes_test.csv', testSet) print ('Train set of diabetes: ',repr(len(trainingSet))) print ('Input for Diabetes disease related parameters :\n ',testSet) #print(trainingSet) #print(testSet) # prepare model summaries = summarizeByClass(trainingSet) #print(summaries) matched_count = matched_count+int(repr(len(summaries))) print('matches: ',repr(len(summaries))) # test model predictions = getPredictions(summaries, testSet) #print('> predicted=' , predictions) print('> disease presence =' , predictions ) accuracy = getAccuracy(testSet, predictions) #print('Accuracy: {0}%').format(accuracy) #print('Accuracy: ',accuracy) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ print ('\n~~~~~~~~~~~'); #checking of presence of heart disease trainingSet=[] testSet=[] loadDataset_hd('dataset_heartdisease_train.csv', trainingSet) total_datas = total_datas+int(repr(len(trainingSet))) loadDataset_hd1('dataset_heartdisease_test.csv', testSet) print ('Train set of heart disease: ',repr(len(trainingSet))) print ('Input for heart disease related parameters :\n ',testSet) summaries = summarizeByClass(trainingSet) #print(summaries) matched_count = matched_count+int(repr(len(summaries))) print('matches: ',repr(len(summaries))) # test model predictions = getPredictions(summaries, testSet) #print('> predicted=' , predictions) print('> disease presence =' , predictions ) accuracy = getAccuracy(testSet, predictions) #print('Accuracy: {0}%').format(accuracy) #print('Accuracy: ',accuracy) print('Total Datas',total_datas,'Matched Accuracy: ',matched_count) main()
7,928
2,970
from collections import namedtuple from copy import deepcopy import numpy as np # data type for 3D-Coordinates Point = namedtuple('Point', 'x y z') # monkey patch function def serializesection(self): data = dict(self._asdict()) data['pos'] = dict(self.pos._asdict()) return data class _Wing: """ A data structue for multi trapez wing definitions. """ _Section = namedtuple('Section', ['pos', 'chord', 'twist', 'airfoil']) _Section.serialize = serializesection def __init__(self, pos=(0.0, 0.0, 0.0)): self.x, self.y, self.z = pos self.sections = [] def append(self, pos=(0.0, 0.0, 0.0), chord=1.0, twist=0.0, airfoil=''): self.sections.append( self._Section(Point(*pos), chord, twist, airfoil) ) def get_mac(self): """Calculate mean aerodynamic chord. Returns ------- pos: arraylike leading edge position of mean aerodynamic chord mac: float mac length Notes ----- Implements formulas reported in http://dx.doi.org/10.1063/1.4951901 """ pos = np.zeros(3) area = 0.0 mac = 0.0 lastsec = None for sec in self.sections: if lastsec is None: lastsec = sec continue # short aliases for usage in formulas x1, x2 = lastsec.pos.x, sec.pos.x y1, y2 = lastsec.pos.y, sec.pos.y c1, c2 = lastsec.chord, sec.chord # segment properties S = (c1+c2)/2 * (y2-y1) λ = c2 / c1 segmac = 2/3 * c1 * (λ**2 + λ + 1) / (λ + 1) segx = x1 + (x2-x1) * (1+2*λ)/(3+3*λ) segy = y1 + (y2-y1) * (1+2*λ)/(3+3*λ) # sum up values weighted by segment area pos += np.array([segx, segy, 0]) * S mac += segmac * S area += S lastsec = sec pos /= area mac /= area return pos, mac @property def span(self): """Get span of wing.""" return 2*max((sec.pos.y for sec in self.sections)) @property def area(self): """Get wing area.""" span_positions = [sec.pos.y for sec in self.sections] chord_lengths = [sec.chord for sec in self.sections] area = np.trapz(chord_lengths, span_positions) return 2*area @property def aspectratio(self): """Get aspect ratio.""" return self.span**2/self.area @property def mac(self): """Get mac length""" return self.get_mac()[1] class Wing(_Wing): """A object representing lift generating airplane parts. Parameters ---------- pos: float coordinate system offset rot: float """ _ControlSurface = namedtuple('ControlSurface', ['pos1', 'pos2', 'depth1', 'depth2', 'cstype']) def __init__(self, pos=(0.0, 0.0, 0.0)): super().__init__(pos) self.controlsurfaces = {} def add_controlsurface(self, name, pos1, pos2, depth1, depth2, cstype): """Add controlsurface to Wing instance Parameters ---------- name : str identifier for control surface pos1 : float starting position (spanwise) pos2 : float end position (spanwise) depth1 : float start depth or chordwise position (depends on type) depth2 : float end depth or chordwise position (depends on type) cstype : str use one of the following type strings: flap, spoiler, airbrake """ self.controlsurfaces[name] = self._ControlSurface( pos1, pos2, depth1, depth2, cstype) @property def chords(self): return np.array([sec.chord for sec in self.sections]) @property def xs(self): return np.array([sec.pos.x for sec in self.sections]) @property def ys(self): return np.array([sec.pos.y for sec in self.sections]) @property def twists(self): return np.array([sec.twist for sec in self.sections]) @property def airfoils(self): return np.array([sec.airfoil for sec in self.sections]) def within_control(self, csname, y): y = np.abs(y) try: cs = self.controlsurfaces[csname] return (cs.pos1 <= y) & (y <= cs.pos2) except KeyError: raise KeyError('{} is not a control surface'.format(csname)) def within_airbrake(self, ys): ys = np.abs(ys) within_ab = np.full_like(ys, False, dtype=bool) for cs in self.controlsurfaces.values(): if cs.cstype in ('airbrake', 'spoiler'): within_tmp = (cs.pos1 <= ys) & (cs.pos2 >= ys) within_ab = np.where(within_tmp, True, within_ab) return within_ab def serialize(self): data = { 'pos': {'x': self.x, 'y': self.y, 'z': self.z}, 'sections': [deepcopy(sec.serialize()) for sec in self.sections], 'controlsurfaces': {name: dict(cs._asdict()) for name, cs in self.controlsurfaces.items()} } return data @classmethod def load_from_file(cls, filename): import yaml with open(filename, 'r') as datfile: wingdata = yaml.safe_load(datfile) return cls.deserialize(wingdata['wing']) @classmethod def deserialize(cls, adict): """Create new Wing instance from dict Parameters ---------- adict : dict dictionary containing wing data Returns ------- Wing instance object """ # create Wing instance wing = cls(pos=Point(**adict['pos'])) # generate sections for secdict in adict['sections']: secdict_ = deepcopy(secdict) secdict_['pos'] = Point(**secdict_['pos']) wing.append(**secdict_) # add control surfaces try: for name, csdict in adict['controlsurfaces'].items(): wing.add_controlsurface(name, **csdict) except KeyError: pass return wing def plot(self): import matplotlib.pyplot as plt # draw centerline #plt.axvline(x=0, linestyle='-.') # draw sections x_positions = [] y_positions = [] chord_lengths = [] for section in self.sections: x = section.pos.x+self.x y = section.pos.y chord = section.chord plt.plot((y, y), (x, x+chord), 'r') x_positions.append(x) y_positions.append(y) chord_lengths.append(chord) y_positions = np.array(y_positions) # draw leading edge plt.plot(y_positions, np.array(x_positions), 'b' ) # draw trailing edge plt.plot(y_positions, np.array(x_positions)+np.array(chord_lengths), 'b') # format plt.axis('equal') plt.axis('off') plt.gca().invert_yaxis() plt.xlim(-max(y_positions)/100, max(y_positions)+1)
7,367
2,342
# -*- coding: utf-8 -*- """ Cross-docking truck data. This data is generated by a generate_dataset.py script. Created: Feb 18, 2019 at 07:30:02 PM Copyright (c) 2022, Krerkkiat Chusap This souce code is licensed under BSD 3-Clause "New" or "Revised" License (see LICENSE for details). """ from pathlib import Path # Problem data. name = Path(__file__).stem inbound_gate_count = 5 outbound_gate_count = 5 # Parameters used to generate this data. number_of_total_product_types = 15 product_per_truck_rate = 0.35 possible_inbound_total_product = [250, 340] # Truck data. _inbound_truck_raw_data = [ [0, 0, 0, 0, 0, 0, 184, 12, 0, 0, 3, 0, 51, 0, 0], [76, 0, 0, 0, 0, 0, 0, 0, 41, 73, 42, 0, 20, 88, 0], [0, 113, 74, 0, 83, 18, 0, 0, 0, 0, 30, 22, 0, 0, 0], [0, 41, 15, 7, 0, 0, 44, 113, 0, 0, 0, 120, 0, 0, 0], [0, 0, 0, 0, 0, 0, 33, 211, 0, 6, 0, 0, 0, 0, 0], [0, 0, 0, 0, 58, 0, 0, 9, 38, 23, 0, 0, 93, 119, 0], [69, 0, 0, 26, 0, 20, 0, 0, 205, 0, 0, 0, 6, 0, 14], [0, 62, 82, 71, 0, 92, 2, 0, 0, 0, 0, 0, 31, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 54, 50, 0, 0, 22, 124, 0], [0, 0, 0, 141, 0, 0, 0, 0, 27, 0, 3, 0, 113, 10, 46], [0, 0, 0, 143, 0, 0, 53, 21, 0, 0, 0, 33, 0, 0, 0], [0, 76, 32, 47, 0, 7, 0, 0, 0, 87, 0, 0, 0, 0, 91], [0, 0, 42, 0, 0, 115, 47, 0, 0, 0, 0, 0, 0, 46, 0], [9, 47, 0, 0, 0, 93, 4, 0, 46, 141, 0, 0, 0, 0, 0], [0, 0, 0, 0, 60, 0, 46, 0, 47, 39, 98, 50, 0, 0, 0], ] _outbound_truck_raw_data = [ [0, 16, 11, 41, 5, 28, 11, 32, 12, 13, 2, 2, 16, 17, 5], [9, 56, 11, 16, 9, 2, 2, 4, 99, 20, 9, 33, 1, 2, 15], [1, 7, 8, 0, 1, 22, 19, 7, 5, 8, 0, 14, 50, 10, 13], [2, 5, 4, 6, 1, 12, 4, 47, 2, 11, 2, 6, 4, 29, 0], [7, 4, 6, 8, 58, 12, 5, 4, 1, 0, 3, 6, 14, 22, 5], [4, 4, 16, 5, 0, 4, 1, 11, 2, 6, 2, 9, 3, 2, 1], [15, 10, 24, 23, 7, 5, 3, 27, 33, 16, 10, 9, 44, 6, 10], [5, 0, 5, 14, 1, 0, 35, 30, 36, 41, 16, 4, 4, 20, 1], [4, 21, 12, 25, 2, 7, 13, 14, 8, 30, 2, 4, 6, 27, 6], [22, 42, 1, 47, 8, 33, 0, 7, 13, 2, 20, 17, 50, 9, 4], [10, 15, 3, 55, 17, 1, 23, 2, 11, 56, 9, 7, 0, 12, 8], [14, 18, 1, 11, 11, 5, 19, 27, 34, 6, 1, 3, 4, 31, 0], [1, 1, 45, 29, 11, 14, 62, 2, 5, 10, 1, 7, 13, 7, 23], [4, 7, 26, 46, 15, 3, 1, 1, 13, 21, 1, 0, 4, 20, 1], [4, 17, 7, 12, 11, 19, 7, 2, 4, 69, 3, 18, 3, 16, 16], [4, 5, 8, 10, 8, 1, 28, 59, 2, 23, 7, 10, 11, 2, 1], [18, 6, 12, 7, 1, 27, 2, 5, 4, 28, 7, 27, 2, 26, 7], [6, 2, 10, 3, 13, 61, 2, 1, 52, 5, 1, 1, 45, 39, 5], [1, 4, 4, 12, 5, 24, 16, 7, 16, 22, 15, 1, 4, 6, 2], [1, 35, 2, 3, 1, 28, 55, 7, 16, 9, 4, 6, 9, 2, 8], [0, 13, 2, 13, 3, 5, 1, 11, 2, 12, 18, 1, 25, 17, 1], [4, 5, 5, 15, 8, 4, 31, 1, 12, 7, 18, 30, 3, 34, 2], [4, 31, 4, 3, 2, 16, 10, 22, 51, 1, 18, 5, 2, 9, 12], [2, 13, 13, 22, 0, 9, 30, 36, 9, 2, 6, 5, 19, 22, 3], [12, 2, 5, 9, 3, 3, 33, 0, 16, 1, 1, 0, 0, 0, 2], ] # Derived data. inbound_truck_count = len(_inbound_truck_raw_data) outbound_truck_count = len(_outbound_truck_raw_data) total_truck_count = inbound_truck_count + outbound_truck_count
3,103
2,461
import base # 开始真正的模型训练 import torch import torch.nn as nn # 实例化网络 net = base.MyCustomNet() # 定义数据集的训练迭代轮次 max_epoch = 5 # 定义学习率 learning_rate = 0.001 # 定义loss函数、定义优化器 import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) # 进行模型训练 for epoch in range(max_epoch): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(base.trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}') running_loss = 0.0 print('Finished Training') PATH = './data/cifar_net.pth' torch.save(net.state_dict(), PATH)
1,121
462
import os import random from typing import List, Tuple, Callable import torch from ignite.contrib.handlers import ProgressBar, LRScheduler from ignite.handlers import ModelCheckpoint from sklearn.base import BaseEstimator, TransformerMixin from torch import nn from torch import optim from torch.nn.modules.loss import _Loss, L1Loss from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, MultiStepLR from torch.utils.data import DataLoader from ignite.metrics import Metric, RunningAverage from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from coord2vec.common.itertools import flatten from coord2vec.common.mtl.metrics import EmbeddingData, DistanceCorrelation, RootMeanSquaredError from coord2vec import config from coord2vec.config import HALF_TILE_LENGTH, TENSORBOARD_DIR from coord2vec.feature_extraction.features_builders import FeaturesBuilder from coord2vec.image_extraction.tile_image import generate_static_maps, render_multi_channel from coord2vec.image_extraction.tile_utils import build_tile_extent from coord2vec.models.architectures import dual_fc_head, multihead_model, simple_cnn, simple_head from coord2vec.models.baselines.tensorboard_utils import TrainExample, \ create_summary_writer, add_metrics_to_tensorboard, add_embedding_visualization, build_example_image_figure from coord2vec.models.data_loading.tile_features_loader import TileFeaturesDataset from coord2vec.models.losses import MultiheadLoss from coord2vec.models.resnet import wide_resnet50_2, resnet18, resnet50, resnet34 class Coord2Vec(BaseEstimator, TransformerMixin): """ Wrapper for the coord2vec algorithm Project's "main" """ def __init__(self, feature_builder: FeaturesBuilder, n_channels: int, losses: List[_Loss] = None, losses_weights: List[float] = None, log_loss: bool = False, exponent_heads: bool = False, cnn_model: Callable = resnet34, model_save_path: str = None, embedding_dim: int = 128, multi_gpu: bool = False, cuda_device: int = 0, lr: float = 1e-4, lr_steps: List[int] = None, lr_gamma: float = 0.1): """ Args: feature_builder: FeatureBuilder to create features with the features were created with n_channels: the number of channels in the input images losses: a list of losses to use. must be same length of the number of features losses_weights: weights to give the different losses. if None then equals weights of 1 log_loss: whether to use the log function on the loss before back propagation embedding_dim: dimension of the embedding to create multi_gpu: whether to use more than one GPU or not cuda_device: if multi_gpu==False, choose the GPU to work on lr: learning rate for the Adam optimizer lr_steps: Training steps in which we apply a multiply by lr_gamma to the LR lr_gamma: The multiplier we multiply the LR """ self.model_save_path = model_save_path self.losses_weights = losses_weights self.log_loss = log_loss self.exponent_head = exponent_heads self.embedding_dim = embedding_dim self.cnn_model = cnn_model self.n_channels = n_channels self.multi_gpu = multi_gpu if not multi_gpu: self.device = torch.device(f'cuda:{cuda_device}' if torch.cuda.is_available() else 'cpu') else: self.device = torch.device(f'cuda' if torch.cuda.is_available() else 'cpu') # self.device = 'cpu' self.feature_names = feature_builder.features_names self.n_features = len(self.feature_names) # create L1 losses if not supplied self.losses = [L1Loss() for _ in range(self.n_features)] if losses is None else losses assert len(self.losses) == self.n_features, "Number of losses must be equal to number of features" # create the model self.model = self._build_model(cnn_model, self.n_channels, self.n_features) if multi_gpu: self.model = nn.DataParallel(self.model) self.model.to(self.device) self.optimizer = optim.Adam(self.model.parameters(), lr=lr) self.step_scheduler = MultiStepLR(self.optimizer, milestones=lr_steps, gamma=lr_gamma) def fit(self, train_dataset: TileFeaturesDataset, val_dataset: TileFeaturesDataset = None, epochs: int = 10, batch_size: int = 10, num_workers: int = 10, evaluate_every: int = 300, save_every: int = 1000): """ Args: train_dataset: The dataset object for training data val_dataset: The dataset object for validation data, optional epochs: number of epochs to train the network batch_size: batch size for the network num_workers: number of workers for the network evaluate_every: every how many steps to run evaluation save_every: every how many steps to save the model Returns: a trained pytorch model """ # create data loader train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) if val_dataset is not None: val_data_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) else: val_data_loader = None # create the model criterion = MultiheadLoss(self.losses, use_log=self.log_loss, weights=self.losses_weights).to(self.device) # create tensorboard writer = create_summary_writer(self.model, train_data_loader, log_dir=TENSORBOARD_DIR) def multihead_loss_func(y_pred, y): return criterion(y_pred[1], torch.split(y, 1, dim=1))[0] def multihead_output_transform(x, y, y_pred, *args): embedding, output = y_pred y_pred_tensor = torch.stack(output).squeeze(2).transpose(0, 1) y_tensor = y data = x with torch.no_grad(): loss, multi_losses = criterion(output, torch.split(y, 1, dim=1)) return data, embedding, loss, multi_losses, y_pred_tensor, y_tensor eval_metrics = {'rmse': RootMeanSquaredError(), # 'corr': DistanceCorrelation(), # 'embedding_data': EmbeddingData() } train_metrics = {'rmse': RootMeanSquaredError() # , 'corr': DistanceCorrelation() } trainer = create_supervised_trainer(self.model, self.optimizer, multihead_loss_func, device=self.device, output_transform=multihead_output_transform) for name, metric in train_metrics.items(): # Calculate metrics also on trainer metric.attach(trainer, name) evaluator = create_supervised_evaluator(self.model, metrics=eval_metrics, device=self.device, output_transform=multihead_output_transform) if self.model_save_path is not None: # do we want to use it ? from Ignite checkpoint_handler = ModelCheckpoint(self.model_save_path, 'checkpoint', save_interval=save_every, n_saved=10, require_empty=False, create_dir=True) pbar = ProgressBar() # RunningAverage(output_transform=lambda x: x[2]) pbar.attach(trainer) scheduler = LRScheduler(self.step_scheduler) trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': self.model}) @trainer.on(Events.EPOCH_STARTED) def init_state_params(engine): engine.state.plusplus_ex, engine.state.plusminus_ex = [None] * self.n_features, [None] * self.n_features engine.state.minusminus_ex, engine.state.minusplus_ex = [None] * self.n_features, [None] * self.n_features @trainer.on(Events.ITERATION_COMPLETED) def log_training_loss(engine): writer.add_scalar('General/LR', scheduler.get_param(), global_step=engine.state.iteration) _, embedding, loss, multi_losses, y_pred_tensor, y_tensor = engine.state.output images_batch, features_batch = engine.state.batch plusplus_ex, plusminus_ex = engine.state.plusplus_ex, engine.state.plusminus_ex minusminus_ex, minusplus_ex = engine.state.minusminus_ex, engine.state.minusplus_ex writer.add_scalar('General/Train Loss', loss, global_step=engine.state.iteration) feat_diff = (y_pred_tensor - y_tensor) # / y_tensor + 1 feat_sum = y_pred_tensor + y_tensor for j in range(self.n_features): writer.add_scalar(f'Multiple Losses/{self.feature_names[j]}', multi_losses[j], global_step=engine.state.iteration) for i in range(len(images_batch)): itm_diff, itm_sum = feat_diff[i][j].item(), feat_sum[i][j].item() itm_pred, itm_actual = y_pred_tensor[i][j].item(), y_tensor[i][j].item() ex = TrainExample(images_batch[i], predicted=itm_pred, actual=itm_actual, sum=itm_sum, diff=itm_diff) if minusminus_ex[j] is None or minusminus_ex[j].sum > itm_sum: engine.state.minusminus_ex[j] = ex elif plusminus_ex[j] is None or plusminus_ex[j].diff < itm_diff: engine.state.plusminus_ex[j] = ex elif minusplus_ex[j] is None or minusplus_ex[j].diff > itm_diff: engine.state.minusplus_ex[j] = ex elif plusplus_ex[j] is None or plusplus_ex[j].sum < itm_sum: engine.state.plusplus_ex[j] = ex @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): global_step = engine.state.iteration metrics = engine.state.metrics # already attached to the trainer engine to save # can add more metrics here add_metrics_to_tensorboard(metrics, writer, self.feature_names, global_step, log_str="train") # plot min-max examples plusplus_ex, plusminus_ex = engine.state.plusplus_ex, engine.state.plusminus_ex minusminus_ex, minusplus_ex = engine.state.minusminus_ex, engine.state.minusplus_ex for j in range(self.n_features): if plusplus_ex[j] is None: continue writer.add_figure(tag=f"{self.feature_names[j]}/plusplus", figure=build_example_image_figure(plusplus_ex[j]), global_step=global_step) writer.add_figure(tag=f"{self.feature_names[j]}/plusminus", figure=build_example_image_figure(plusminus_ex[j]), global_step=global_step) writer.add_figure(tag=f"{self.feature_names[j]}/minusminus", figure=build_example_image_figure(minusminus_ex[j]), global_step=global_step) writer.add_figure(tag=f"{self.feature_names[j]}/minusplus", figure=build_example_image_figure(minusplus_ex[j]), global_step=global_step) @trainer.on(Events.ITERATION_COMPLETED) def log_validation_results(engine): global_step = engine.state.iteration if global_step % evaluate_every == 0: evaluator.run(val_data_loader) metrics = evaluator.state.metrics # can add more metrics here add_metrics_to_tensorboard(metrics, writer, self.feature_names, global_step, log_str="validation") # add_embedding_visualization(writer, metrics, global_step) if global_step % save_every == 0: self.save_trained_model(f"{self.model_save_path}/{global_step}_model.pth") trainer.run(train_data_loader, max_epochs=epochs) return self.model def load_trained_model(self, path: str): """ load a trained model Args: path: path of the saved torch NN Returns: the trained model in 'path' """ checkpoint = torch.load(path) self.model.load_state_dict(checkpoint['model_state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.embedding_dim = checkpoint['embedding_dim'] self.losses = checkpoint['losses'] self.model = self.model.to(self.device) return self def _model_to(self): self.model = self.model.to(self.device) # from apex import amp # if self.amp: # model, optimizer = amp.initialize(model.to('cuda'), optimizer, opt_level="O1") def save_trained_model(self, path: str): """ save a trained model Args: path: path of the saved torch NN """ self.model = self.model.to('cpu') os.makedirs(os.path.dirname(path), exist_ok=True) torch.save({ 'model_state_dict': self.model.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'embedding_dim': self.embedding_dim, 'losses': self.losses, }, path) self.model = self.model.to(self.device) def transform(self, coords: List[Tuple[float, float]]) -> torch.tensor: """ get the embedding of coordinates Args: coords: a list of tuple like (lat, long) to predict on Returns: A tensor of shape [n_coords, embedding_dim] """ # create tiles using the coords s = generate_static_maps(config.tile_server_dns_noport, config.tile_server_ports) images = [] for coord in coords: ext = build_tile_extent(coord, radius_in_meters=HALF_TILE_LENGTH) image = render_multi_channel(s, ext) images.append(image) images = torch.tensor(images).float().to(self.device) # predict the embedding embeddings, output = self.model(images) return embeddings.to('cpu') def _build_model(self, cnn_model, n_channels, n_heads): model = cnn_model(n_channels, self.embedding_dim) # model = simple_cnn(n_channels, self.embedding_dim) heads = [simple_head(self.embedding_dim) for _ in range(n_heads)] model = multihead_model(model, heads) return model
14,934
4,451
from random import choice from time import sleep jokenpo = ['Pedra', 'Papel', 'Tesoura'] jokenposter_stainger = choice(jokenpo) jogador = int(input('Qual a sua jogada?' '\n1. Pedra' '\n2. Papel' '\n3. Tesoura' '\nEscolha: ')) print('\nJO...') sleep(1) print('KEN...') sleep(1) print('PO!!!' '\n ') if jogador == 1: print('Você: Pedra') elif jogador == 2: print('Você: Papel') elif jogador == 3: print('Você: Tesoura') else: print('Escolha uma opção válida.') print('Jokenposter Stainger: {}'.format(jokenposter_stainger)) sleep(2) print('--' * 20) if jokenposter_stainger == 'Pedra' and jogador == 1: print('Empate!') w = 0 elif jokenposter_stainger == 'Pedra' and jogador == 2: print('Você ganhou!') w = 2 elif jokenposter_stainger == 'Pedra' and jogador == 3: print('Você perdeu!') w = 1 elif jokenposter_stainger == 'Papel' and jogador == 1: print('Você perdeu!') w = 1 elif jokenposter_stainger == 'Papel' and jogador == 2: print('Empate!') w = 0 elif jokenposter_stainger == 'Papel' and jogador == 3: print('Você ganhou!') w = 2 elif jokenposter_stainger == 'Tesoura' and jogador == 1: print('Você ganhou!') w = 2 elif jokenposter_stainger == 'Tesoura' and jogador == 2: print('Você perdeu!') w = 1 elif jokenposter_stainger == 'Tesoura' and jogador == 3: print('Empate!') w = 0 if w == 0: print('Jokenposter Stainger: Vamo de novo! Ta com medinho?') elif w == 1: print('Jokenposter Stainger: OTÁRIO ') elif w == 2: print('Jokenposter Stainger: TAAAVA DEMORAANDO! Revanche!') else: print(' ') print('--' * 20)
1,716
718
from django.apps import AppConfig class TextGeneratorConfig(AppConfig): name = 'text_generator'
102
29
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import os import stat protoc_exec = None def find_protoc(): global protoc_exec if protoc_exec is not None: return protoc_exec script_dir = os.path.dirname(os.path.realpath(__file__)) if sys.platform[0:5].lower() == "linux": protoc_exec = os.path.join(script_dir, 'linux_x86_64', 'protoc') elif sys.platform[0:6].lower() == "darwin": protoc_exec = os.path.join(script_dir, 'macos_x86_64', 'protoc') else: protoc_exec = os.path.join(script_dir, 'windows_x86_64', 'protoc.exe') os.chmod(protoc_exec, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO) return protoc_exec """ run as a executable """ if __name__ == "__main__": print(find_protoc())
760
303
from io import BytesIO import json import re import requests from django.conf import settings from django.contrib import messages from django.contrib.auth.decorators import login_required, permission_required from django.core.exceptions import PermissionDenied from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from django.db.models import Count from django.forms.models import inlineformset_factory from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect) from django.shortcuts import get_object_or_404, render from django.template.loader import render_to_string from django.urls.base import reverse from django.utils import timezone from xhtml2pdf import pisa from . import forms, models from events.models import Location from emails.generators import DefaultLNLEmailGenerator from pdfs.views import link_callback NUM_IN_PAGE = 25 @login_required def view_all(request): """ Lists all items in LNL's inventory (no longer maintained - read-only) """ if not request.user.has_perm('inventory.view_equipment'): raise PermissionDenied context = {} inv = models.EquipmentClass.objects.order_by('name') \ .annotate(item_count=Count('items')) categories = models.EquipmentCategory.objects.all() paginator = Paginator(inv, NUM_IN_PAGE) page = request.GET.get('page') try: context['inv'] = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. context['inv'] = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. context['inv'] = paginator.page(paginator.num_pages) context['h2'] = "Inventory: Item List" context['cats'] = categories return render(request, 'inventory/list.html', context) @login_required def cat(request, category_id): """ List items by category :param category_id: The primary key value of the equipment category """ if not request.user.has_perm('inventory.view_equipment'): raise PermissionDenied context = {} category = get_object_or_404(models.EquipmentCategory, pk=category_id) if 'exclusive' in request.GET and request.GET['exclusive']: inv = models.EquipmentClass.objects.filter(category=category) context['exclusive'] = True else: inv = models.EquipmentClass.objects.filter(category__in=category.get_descendants_inclusive) context['exclusive'] = False inv = inv.order_by('category__level', 'category__name', 'name') \ .annotate(item_count=Count('items')) subcategories = models.EquipmentCategory.objects.all() paginator = Paginator(inv, NUM_IN_PAGE) page = request.GET.get('page') try: context['inv'] = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. context['inv'] = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. context['inv'] = paginator.page(paginator.num_pages) context['h2'] = "Inventory: %s" % category.name context['cat'] = category context['cats'] = subcategories return render(request, 'inventory/list.html', context) # Inventory is currently read-only now that we are using Snipe # @login_required # def quick_bulk_add(request, type_id): # if request.method != 'POST': # return HttpResponseBadRequest('Invalid operation') # if 'num_to_add' not in request.POST: # return HttpResponseBadRequest('Missing parameters') # # try: # num_to_add = int(request.POST['num_to_add']) # except (ValueError, TypeError): # return HttpResponseBadRequest('Bad parameters') # # try: # e_type = models.EquipmentClass.objects.get(pk=int(type_id)) # except models.EquipmentClass.DoesNotExist: # return HttpResponseNotFound() # # if not request.user.has_perm('inventory.add_equipmentitem', e_type): # raise PermissionDenied # # models.EquipmentItem.objects.bulk_add_helper(e_type, num_to_add) # # messages.add_message(request, messages.SUCCESS, # "%d items added and saved. Now editing." % num_to_add) # # return HttpResponseRedirect(reverse('inventory:bulk_edit', # kwargs={'type_id': type_id})) # # # @login_required # def quick_bulk_edit(request, type_id): # e_type = get_object_or_404(models.EquipmentClass, pk=int(type_id)) # # if not request.user.has_perm('inventory.change_equipmentitem', e_type): # raise PermissionDenied # # can_delete = request.user.has_perm('inventory.delete_equipmentitem', e_type) # fs_factory = inlineformset_factory(models.EquipmentClass, models.EquipmentItem, # form=forms.EquipmentItemForm, # extra=0, can_delete=can_delete) # # if request.method == 'POST': # formset = fs_factory(request.POST, request.FILES, instance=e_type) # if formset.is_valid(): # formset.save() # messages.add_message(request, messages.SUCCESS, # "Items saved.") # return HttpResponseRedirect(reverse('inventory:type_detail', # kwargs={'type_id': type_id})) # else: # formset = fs_factory(instance=e_type) # qs = models.EquipmentCategory.possible_locations() # for form in formset: # form.fields['home'].queryset = qs # return render(request, "formset_grid.html", { # 'msg': "Bulk inventory edit for '%s'" % e_type.name, # "formset": formset, # 'form_show_errors': True # }) # # # @login_required # def type_edit(request, type_id): # try: # e_type = models.EquipmentClass.objects.get(pk=int(type_id)) # except models.EquipmentClass.DoesNotExist: # return HttpResponseNotFound() # # if not request.user.has_perm('inventory.change_equipmentclass', e_type): # raise PermissionDenied # # if request.method == 'POST': # form = forms.EquipmentClassForm(request.POST, request.FILES, instance=e_type) # if form.is_valid(): # form.save() # messages.add_message(request, messages.SUCCESS, # "Equipment type saved.") # return HttpResponseRedirect(reverse('inventory:type_detail', # kwargs={'type_id': type_id})) # else: # form = forms.EquipmentClassForm(instance=e_type) # return render(request, "form_crispy.html", { # 'msg': "Edit '%s'" % e_type.name, # "form": form, # }) # # # @login_required # def type_mk(request): # if not request.user.has_perm('inventory.add_equipmentclass'): # raise PermissionDenied # # category = request.GET.get('default_cat') # # if request.method == 'POST': # form = forms.EquipmentClassForm(request.POST, request.FILES) # if form.is_valid(): # obj = form.save() # messages.add_message(request, messages.SUCCESS, # "Equipment type added.") # return HttpResponseRedirect(reverse('inventory:type_detail', # kwargs={'type_id': obj.pk})) # else: # form = forms.EquipmentClassForm(initial={'category': category}) # return render(request, "form_crispy.html", { # 'msg': "Create Equipment Type", # "form": form, # }) # # # @login_required # def type_rm(request, type_id): # obj = get_object_or_404(models.EquipmentClass, pk=int(type_id)) # return_page = reverse('inventory:cat', args=[obj.category.pk]) # # if not request.user.has_perm('inventory.delete_equipmentclass', obj): # raise PermissionDenied # # if request.method == 'POST': # if obj.items.exists(): # return HttpResponseBadRequest("There are still items of this type") # else: # obj.delete() # return HttpResponseRedirect(return_page) # else: # return HttpResponseBadRequest("Bad method") # # # @login_required # def cat_edit(request, category_id): # category = get_object_or_404(models.EquipmentCategory, pk=category_id) # # if not request.user.has_perm('inventory.change_equipmentcategory', category): # raise PermissionDenied # # if request.method == 'POST': # form = forms.CategoryForm(request.POST, request.FILES, instance=category) # if form.is_valid(): # form.save() # messages.add_message(request, messages.SUCCESS, # "Category saved.") # return HttpResponseRedirect(reverse('inventory:cat', # kwargs={'category_id': category_id})) # else: # form = forms.CategoryForm(instance=category) # return render(request, "form_crispy.html", { # 'msg': "Edit Category", # "form": form, # }) # # # @login_required # def cat_mk(request): # if not request.user.has_perm('inventory.add_equipmentcategory'): # raise PermissionDenied # # parent = request.GET.get('parent') # # if request.method == 'POST': # form = forms.CategoryForm(request.POST, request.FILES) # if form.is_valid(): # obj = form.save() # messages.add_message(request, messages.SUCCESS, # "Category added.") # return HttpResponseRedirect(reverse('inventory:cat', # kwargs={'category_id': obj.pk})) # else: # form = forms.CategoryForm(initial={'parent': parent}) # return render(request, "form_crispy.html", { # 'msg': "Create Category", # "form": form, # }) # # # @login_required # def cat_rm(request, category_id): # ecat = get_object_or_404(models.EquipmentCategory, pk=int(category_id)) # if ecat.parent: # return_url = reverse('inventory:cat', args=[ecat.parent.pk]) # else: # return_url = reverse('inventory:view_all') # # if not request.user.has_perm('inventory.delete_equipmentcategory', ecat): # raise PermissionDenied # # if request.method == 'POST': # if ecat.get_children().exists(): # return HttpResponseBadRequest("There are still subcategories of this type") # elif ecat.equipmentclass_set.exists(): # return HttpResponseBadRequest("There are still items in this category") # else: # ecat.delete() # return HttpResponseRedirect(return_url) # else: # return HttpResponseBadRequest("Bad method") # # # @login_required # def fast_mk(request): # if not request.user.has_perm('inventory.add_equipmentitem'): # raise PermissionDenied # # try: # category = int(request.GET['default_cat']) # except (ValueError, KeyError, TypeError): # category = None # # if request.method == 'POST': # form = forms.FastAdd(request.user, request.POST, request.FILES) # if form.is_valid(): # obj = form.save() # messages.add_message(request, messages.SUCCESS, # "%d items added and saved. Now editing." % form.cleaned_data['num_to_add']) # return HttpResponseRedirect(reverse('inventory:bulk_edit', # kwargs={'type_id': obj.pk})) # else: # form = forms.FastAdd(request.user, initial={'item_cat': category}) # return render(request, "form_crispy.html", { # 'msg': "Fast Add Item(s)", # "form": form, # }) @login_required def type_detail(request, type_id): """ Detail page for a group of items """ e = get_object_or_404(models.EquipmentClass, pk=type_id) return render(request, 'inventory/type_detail.html', { 'breadcrumbs': e.breadcrumbs, 'equipment': e }) @login_required def item_detail(request, item_id): """ Detail page for a specific item """ item = get_object_or_404(models.EquipmentItem, pk=item_id) return render(request, 'inventory/item_detail.html', { 'breadcrumbs': item.breadcrumbs, 'item': item }) # @login_required # def item_edit(request, item_id): # try: # item = models.EquipmentItem.objects.get(pk=int(item_id)) # except models.EquipmentItem.DoesNotExist: # return HttpResponseNotFound() # # if not request.user.has_perm('inventory.change_equipmentitem', item): # raise PermissionDenied # # if request.method == 'POST': # form = forms.EquipmentItemForm(request.POST, request.FILES, instance=item) # if form.is_valid(): # form.save() # messages.add_message(request, messages.SUCCESS, # "Item saved.") # return HttpResponseRedirect(reverse('inventory:item_detail', # kwargs={'item_id': item_id})) # else: # form = forms.EquipmentItemForm(instance=item) # return render(request, "form_crispy.html", { # 'msg': "Edit '%s'" % str(item), # "form": form, # }) # # # @login_required # def item_rm(request, item_id): # obj = get_object_or_404(models.EquipmentItem, pk=int(item_id)) # return_page = reverse('inventory:type_detail', args=[obj.item_type.pk]) # # if not request.user.has_perm('inventory.delete_equipmentitem', obj): # raise PermissionDenied # # if request.method == 'POST': # if obj.unsafe_to_delete: # return HttpResponseBadRequest("There are still items of this type") # else: # obj.delete() # return HttpResponseRedirect(return_page) # else: # return HttpResponseBadRequest("Bad method") @login_required @permission_required('inventory.view_equipment', raise_exception=True) def snipe_checkout(request): """ Equipment inventory checkout form. Communicates with Snipe via their API. """ if not settings.SNIPE_URL: return HttpResponse('This page is unavailable because SNIPE_URL is not set.', status=501) if not settings.SNIPE_API_KEY: return HttpResponse('This page is unavailable because SNIPE_API_KEY is not set.', status=501) # Get the list of users in the rental group from Snipe error_message = 'Error communicating with Snipe. Did not check out anything.' checkout_to_choices = [] response = requests.request('GET', '{}api/v1/users'.format(settings.SNIPE_URL), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json' }) if response.status_code == 200: try: data = json.loads(response.text) if data.get('status') == 'error': return HttpResponse(error_message, status=502) checkout_to_choices = [(user['id'], user['name']) for user in data['rows'] if 'rental' in ((group['name'] for group in user['groups']['rows']) if user['groups'] is not None else ())] except ValueError: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) # Handle the form error_message = 'Error communicating with Snipe. Some things may have been checked out while some were not. ' \ 'Please go check Snipe.' if request.method == 'POST': receipt_info = {} form = forms.SnipeCheckoutForm(checkout_to_choices, request.POST, request.FILES) if form.is_valid(): success_count_assets = 0 success_count_accessories = 0 for tag in [tag for tag in re.split('[^a-zA-Z0-9]', form.cleaned_data['asset_tags']) if tag]: match = re.match('LNLACC([0-9]+)', tag) if match: tag = match.group(1) # This tag represents an accessory response = requests.request('GET', '{}api/v1/accessories/{}'.format(settings.SNIPE_URL, tag), headers={'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json'}) if response.status_code == 200: try: data = json.loads(response.text) if data.get('status') == 'error': # No accessory with that ID exists in Snipe messages.add_message(request, messages.ERROR, 'No such accessory with ID {}'.format(tag)) continue accessory_name = data['name'] rental_price = float(data['order_number']) if data['order_number'] is not None else None # Check out the accessory response = requests.request('POST', '{}api/v1/accessories/{}/checkout'.format(settings.SNIPE_URL, tag), data=json.dumps({ 'assigned_to': form.cleaned_data['checkout_to'], }), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json', }) if response.status_code == 200: data = json.loads(response.text) if data.get('status') == 'error': # Snipe refused to check out the accessory (maybe they are all checked out) messages.add_message(request, messages.ERROR, 'Unable to check out accessory {}. Snipe says: {}'.format(tag, data['messages'])) continue # The accessory was successfully checked out success_count_accessories += 1 if tag in receipt_info: if receipt_info[tag]['name'] != accessory_name \ or receipt_info[tag]['rental_price'] != rental_price: return HttpResponse(error_message, status=502) receipt_info[tag]['quantity'] += 1 else: receipt_info[tag] = {'name': accessory_name, 'rental_price': rental_price, 'quantity': 1} else: return HttpResponse(error_message, status=502) except ValueError: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) else: # This tag represents an asset response = requests.request('GET', '{}api/v1/hardware/bytag/{}'.format(settings.SNIPE_URL, tag), headers={'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json'}) if response.status_code == 200: try: data = json.loads(response.text) if data.get('status') == 'error': # The asset tag does not exist in Snipe messages.add_message(request, messages.ERROR, 'No such asset tag {}'.format(tag)) continue asset_name = data['name'] if 'custom_fields' in data and 'Rental Price' in data['custom_fields'] and \ 'value' in data['custom_fields']['Rental Price'] and data['custom_fields']['Rental Price']['value'] is not None: rental_price = float(data['custom_fields']['Rental Price']['value']) else: rental_price = None # Check out the asset response = requests.request('POST', '{}api/v1/hardware/{}/checkout'.format(settings.SNIPE_URL, data['id']), data=json.dumps({ 'checkout_to_type': 'user', 'assigned_user': form.cleaned_data['checkout_to'], }), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json', }) if response.status_code == 200: data = json.loads(response.text) if data.get('status') == 'error': # Snipe refused to check out the asset (maybe it is already checked out) messages.add_message(request, messages.ERROR, 'Unable to check out asset {} - {}. Snipe says: {}'.format(tag, asset_name, data['messages'])) continue # The asset was successfully checked out success_count_assets += 1 if tag in receipt_info: return HttpResponse(error_message, status=502) receipt_info[tag] = {'name': asset_name, 'rental_price': rental_price, 'quantity': 1} else: return HttpResponse(error_message, status=502) except ValueError: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) if success_count_assets > 0 or success_count_accessories > 0: messages.add_message(request, messages.SUCCESS, 'Successfully checked out {} assets and {} accessories'.format(success_count_assets, success_count_accessories)) rental_prices = [(None if asset_info['rental_price'] is None else asset_info['rental_price'] * asset_info['quantity']) for asset_info in receipt_info.values()] total_rental_price = None if None in rental_prices else sum(rental_prices) checkout_to_name = next((item[1] for item in checkout_to_choices if item[0] == form.cleaned_data['checkout_to'])) # Before returning the response, email a PDF receipt html = render_to_string('pdf_templates/checkout_receipt.html', request=request, context={ 'title': 'Checkout Receipt', 'receipt_info': receipt_info, 'num_assets': success_count_assets, 'num_accessories': success_count_accessories, 'total_rental_price': total_rental_price, 'checkout_to': checkout_to_name, }) pdf_file = BytesIO() pisa.CreatePDF(html, dest=pdf_file, link_callback=link_callback) pdf_handle = pdf_file.getvalue() filename = 'LNL-checkout-receipt-{}.pdf'.format(timezone.now().isoformat()) attachments = [{'file_handle': pdf_handle, 'name': filename}] email = DefaultLNLEmailGenerator(subject='LNL Inventory Checkout Receipt', to_emails=(request.user.email, settings.EMAIL_TARGET_RENTALS), attachments=attachments, body='A receipt for the rental checkout by {} to {} is attached.'.format(request.user, checkout_to_name)) email.send() # Return the response return render(request, 'inventory/checkout_receipt.html', { 'receipt_info': receipt_info, 'num_assets': success_count_assets, 'num_accessories': success_count_accessories, 'total_rental_price': total_rental_price, 'checkout_to': form.cleaned_data['checkout_to'], 'checkout_to_name': checkout_to_name, }) else: form = forms.SnipeCheckoutForm(checkout_to_choices, initial={'checkout_to': form.cleaned_data['checkout_to']}) else: if 'checkout_to' in request.GET: form = forms.SnipeCheckoutForm(checkout_to_choices, initial={'checkout_to': request.GET['checkout_to']}) else: form = forms.SnipeCheckoutForm(checkout_to_choices) return render(request, "form_crispy.html", { 'msg': 'Inventory checkout', 'form': form, }) @login_required @permission_required('inventory.view_equipment', raise_exception=True) def snipe_checkin(request): """ Equipment inventory checkin form. Communicates with Snipe via their API. """ if not settings.SNIPE_URL: return HttpResponse('This page is unavailable because SNIPE_URL is not set.', status=501) if not settings.SNIPE_API_KEY: return HttpResponse('This page is unavailable because SNIPE_API_KEY is not set.', status=501) # Get the list of users in the rental group from Snipe error_message = 'Error communicating with Snipe. Did not check in anything.' checkin_from_choices = [] response = requests.request('GET', '{}api/v1/users'.format(settings.SNIPE_URL), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json' }) if response.status_code == 200: try: data = json.loads(response.text) if data.get('status') == 'error': return HttpResponse(error_message, status=502) checkin_from_choices = [(user['id'], user['name']) for user in data['rows'] if 'rental' in ((group['name'] for group in user['groups']['rows']) if user['groups'] is not None else ())] checkin_from_usernames = {user['id']: user['username'] for user in data['rows'] if 'rental' in ((group['name'] for group in user['groups']['rows']) if user['groups'] is not None else ())} except ValueError: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) # Handle the form error_message = 'Error communicating with Snipe. Some things may have been checked in while some were not. Please go check Snipe.' if request.method == 'POST': form = forms.SnipeCheckinForm(checkin_from_choices, request.POST, request.FILES) if form.is_valid(): receipt_info = {} receipt_info_extra = {} checkin_from_name = next((item[1] for item in checkin_from_choices if item[0] == form.cleaned_data['checkin_from'])) checkin_from_username = checkin_from_usernames[form.cleaned_data['checkin_from']] success_count_assets = 0 success_count_accessories = 0 extra_count_assets = 0 extra_count_accessories = 0 for tag in [tag for tag in re.split('[^a-zA-Z0-9]', form.cleaned_data['asset_tags']) if tag]: match = re.match('LNLACC([0-9]+)', tag) if match: tag = match.group(1) # This tag represents an accessory response = requests.request('GET', '{}api/v1/accessories/{}'.format(settings.SNIPE_URL, tag), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json' }) if response.status_code == 200: try: data = json.loads(response.text) if data.get('status') == 'error': # No accessory with that ID exists in Snipe messages.add_message(request, messages.ERROR, 'No such accessory with ID {}'.format(tag)) continue accessory_name = data['name'] rental_price = float(data['order_number']) if data['order_number'] is not None else None # Get the list of checked out instances of the accessory response = requests.request('GET', '{}api/v1/accessories/{}/checkedout'.format(settings.SNIPE_URL, tag), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json' }) if response.status_code == 200: data = json.loads(response.text) if data.get('status') == 'error': return HttpResponse(error_message, status=502) accessory_instances = [a for a in data['rows'] if a['username'] == checkin_from_username] if len(accessory_instances) == 0: # There are no instances of that accessory checked out to the specified Snipe user messages.add_message(request, messages.ERROR, 'No instance of {} checked out to {}'.format(accessory_name, checkin_from_name)) extra_count_accessories += 1 if tag in receipt_info_extra: if receipt_info_extra[tag]['name'] != accessory_name \ or receipt_info_extra[tag]['rental_price'] != rental_price: return HttpResponse(error_message, status=502) receipt_info_extra[tag]['quantity'] += 1 else: receipt_info_extra[tag] = {'name': accessory_name, 'rental_price': rental_price, 'quantity': 1} continue # Check in the accessory response = requests.request('POST', '{}api/v1/accessories/{}/checkin'.format(settings.SNIPE_URL, accessory_instances[0]['assigned_pivot_id']), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json', }) if response.status_code == 200: data = json.loads(response.text) if data.get('status') == 'error': # Snipe refused to check in the accessory messages.add_message(request, messages.ERROR, 'Unable to check in accessory {}. Snipe says: {}'.format(tag, data['messages'])) continue # The accessory was successfully checked in success_count_accessories += 1 if tag in receipt_info: if receipt_info[tag]['name'] != accessory_name \ or receipt_info[tag]['rental_price'] != rental_price: return HttpResponse(error_message, status=502) receipt_info[tag]['quantity'] += 1 else: receipt_info[tag] = {'name': accessory_name, 'rental_price': rental_price, 'quantity': 1} else: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) except ValueError: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) else: # This tag represents an asset response = requests.request('GET', '{}api/v1/hardware/bytag/{}'.format(settings.SNIPE_URL, tag), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json' }) if response.status_code == 200: try: data = json.loads(response.text) if data.get('status') == 'error': # The asset tag does not exist in Snipe messages.add_message(request, messages.ERROR, 'No such asset tag {}'.format(tag)) continue asset_name = data['name'] if 'custom_fields' in data and 'Rental Price' in data['custom_fields'] and \ 'value' in data['custom_fields']['Rental Price'] and data['custom_fields']['Rental Price']['value'] is not None: rental_price = float(data['custom_fields']['Rental Price']['value']) else: rental_price = None if ('assigned_to' not in data or data['assigned_to'] is None or 'type' not in data['assigned_to'] or data['assigned_to']['type'] != 'user' or 'id' not in data['assigned_to'] or data['assigned_to']['id'] != form.cleaned_data['checkin_from']): # That asset is not checked out to the specified Snipe user messages.add_message(request, messages.ERROR, 'Asset {} was never checked out to {}'.format(asset_name, checkin_from_name)) extra_count_assets += 1 if tag in receipt_info: return HttpResponse(error_message, status=502) receipt_info_extra[tag] = {'name': asset_name, 'rental_price': rental_price, 'quantity': 1} continue # Check in the asset response = requests.request('POST', '{}api/v1/hardware/{}/checkin'.format(settings.SNIPE_URL, data['id']), headers={ 'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY), 'accept': 'application/json', 'content-type': 'application/json', }) if response.status_code == 200: data = json.loads(response.text) if data.get('status') == 'error': # Snipe refused to check in the asset messages.add_message(request, messages.ERROR, 'Unable to check in asset {} - {}. Snipe says: {}'.format(tag, asset_name, data['messages'])) continue # The asset was successfully checked in success_count_assets += 1 if tag in receipt_info: return HttpResponse(error_message, status=502) receipt_info[tag] = {'name': asset_name, 'rental_price': rental_price, 'quantity': 1} else: return HttpResponse(error_message, status=502) except ValueError: return HttpResponse(error_message, status=502) else: return HttpResponse(error_message, status=502) if success_count_assets > 0 or success_count_accessories > 0: messages.add_message(request, messages.SUCCESS, 'Successfully checked in {} assets and {} accessories'.format(success_count_assets, success_count_accessories)) rental_prices = [(None if asset_info['rental_price'] is None else asset_info['rental_price'] * asset_info['quantity']) for asset_info in receipt_info.values()] extra_prices = [(None if asset_info['rental_price'] is None else asset_info['rental_price'] * asset_info['quantity']) for asset_info in receipt_info_extra.values()] total_rental_price = None if None in rental_prices or None in extra_prices else sum(rental_prices) + sum(extra_prices) # Before returning the response, email a PDF receipt html = render_to_string('pdf_templates/checkin_receipt.html', request=request, context={ 'title': 'Checkin Receipt', 'receipt_info': receipt_info, 'receipt_info_extra': receipt_info_extra, 'num_assets': success_count_assets, 'num_accessories': success_count_accessories, 'num_extra_assets': extra_count_assets, 'num_extra_accessories': extra_count_accessories, 'total_rental_price': total_rental_price, 'checkin_from': checkin_from_name, }) pdf_file = BytesIO() pisa.CreatePDF(html, dest=pdf_file, link_callback=link_callback) pdf_handle = pdf_file.getvalue() filename = 'LNL-checkin-receipt-{}.pdf'.format(timezone.now().isoformat()) attachments = [{'file_handle': pdf_handle, 'name': filename}] email = DefaultLNLEmailGenerator(subject='LNL Inventory Checkin Receipt', to_emails=(request.user.email, settings.EMAIL_TARGET_RENTALS), attachments=attachments, body='A receipt for the rental checkin by {} from {} is attached.'.format(request.user, checkin_from_name)) email.send() # Return the response return render(request, 'inventory/checkin_receipt.html', { 'receipt_info': receipt_info, 'receipt_info_extra': receipt_info_extra, 'num_assets': success_count_assets, 'num_accessories': success_count_accessories, 'num_extra_assets': extra_count_assets, 'num_extra_accessories': extra_count_accessories, 'total_rental_price': total_rental_price, 'checkin_from': form.cleaned_data['checkin_from'], 'checkin_from_name': checkin_from_name, }) else: form = forms.SnipeCheckinForm(checkin_from_choices, initial={'checkin_from': form.cleaned_data['checkin_from']}) else: if 'checkin_from' in request.GET: form = forms.SnipeCheckinForm(checkin_from_choices, initial={'checkin_from': request.GET['checkin_from']}) else: form = forms.SnipeCheckinForm(checkin_from_choices) return render(request, "form_crispy.html", { 'msg': 'Inventory checkin', 'form': form, }) @login_required @permission_required('inventory.view_equipment', raise_exception=True) def snipe_credentials(request): context = { 'title': 'Snipe Login Credentials', 'message': '<span style="font-size: 1.3em"><strong>Username:</strong> ' + settings.SNIPE_GENERAL_USER + '<br><strong>Password:</strong> ' + settings.SNIPE_GENERAL_PASS + '</span><br><br>' '<a class="btn btn-primary" href="https://lnl-rt.wpi.edu/snipe" target="_blank">Login Now</a>' } return render(request, 'default.html', context) @login_required def log_access(request, location=None, reason=None): """ Checkin form used by LNL members when accessing a storage location (contact tracing) :param location: The name of the location (must match a location that contains equipment) :param reason: Should be set to "OUT" if user is checking out of a location (None otherwise) """ context = {'NO_FOOT': True, 'NO_NAV': True, 'NO_API': True, 'LIGHT_THEME': True} location = location.replace('-', ' ') space = Location.objects.filter(holds_equipment=True, name__icontains=location).first() if not space: return HttpResponseNotFound("Invalid Location ID") if request.method == 'POST': form = forms.AccessForm(request.POST, location=space.name, reason=reason, initial={'users': [request.user]}) if form.is_valid(): record = form.save(commit=False) record.location = space record.save() form.save_m2m() if reason == "OUT": messages.success(request, "Thank you! Come again soon!", extra_tags="success") else: messages.success(request, "Thank you! You are now signed in.", extra_tags="success") return HttpResponseRedirect(reverse("home")) else: form = forms.AccessForm(location=space.name, reason=reason, initial={'users': [request.user]}) context['form'] = form return render(request, 'form_crispy_static.html', context) @login_required @permission_required('inventory.view_access_logs', raise_exception=True) def view_logs(request): """ View contact tracing logs for LNL storage spaces """ headers = ['Timestamp', 'User', 'Location', 'Reason'] def get_timestamp(data): return data.get('timestamp') records = [] for record in models.AccessRecord.objects.all(): for user in record.users.all(): obj = {'timestamp': record.timestamp, 'user': user, 'location': record.location, 'reason': record.reason} records.append(obj) records.sort(key=get_timestamp, reverse=True) paginator = Paginator(records, 50) page_number = request.GET.get('page', 1) current_page = paginator.get_page(page_number) context = {'records': current_page, 'title': 'Access Log', 'headers': headers} return render(request, 'access_log.html', context)
43,253
11,723
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=unused-argument, import-outside-toplevel, protected-access import re from datetime import datetime from flask.ctx import AppContext from superset.errors import ErrorLevel, SupersetError, SupersetErrorType from tests.unit_tests.fixtures.common import dttm SYNTAX_ERROR_REGEX = re.compile( ": mismatched input '(?P<syntax_error>.*?)'. Expecting: " ) def test_convert_dttm(app_context: AppContext, dttm: datetime) -> None: """ Test that date objects are converted correctly. """ from superset.db_engine_specs.athena import AthenaEngineSpec assert AthenaEngineSpec.convert_dttm("DATE", dttm) == "DATE '2019-01-02'" assert ( AthenaEngineSpec.convert_dttm("TIMESTAMP", dttm) == "TIMESTAMP '2019-01-02 03:04:05.678'" ) def test_extract_errors(app_context: AppContext) -> None: """ Test that custom error messages are extracted correctly. """ from superset.db_engine_specs.athena import AthenaEngineSpec msg = ": mismatched input 'fromm'. Expecting: " result = AthenaEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( message='Please check your query for syntax errors at or near "fromm". Then, try running your query again.', error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ "engine_name": "Amazon Athena", "issue_codes": [ { "code": 1030, "message": "Issue 1030 - The query has a syntax error.", } ], }, ) ] def test_get_text_clause_with_colon(app_context: AppContext) -> None: """ Make sure text clauses don't escape the colon character """ from superset.db_engine_specs.athena import AthenaEngineSpec query = ( "SELECT foo FROM tbl WHERE " "abc >= TIMESTAMP '2021-11-26T00\:00\:00.000000'" ) text_clause = AthenaEngineSpec.get_text_clause(query) assert text_clause.text == query
2,877
894
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import pytest from django.test import RequestFactory from django.urls import reverse from doubles import allow, expect from rest_framework import status from apps.accounts.models.choices import ActionCategory from apps.accounts.services.auth import AuthService from apps.accounts.tests.factories.pending_action import PendingActionFactory from apps.accounts.views.confirm_email import ConfirmEmailView @pytest.mark.django_db class ConfirmEmailTests: @classmethod def make_confirm_email_url(cls, token): return reverse( 'accounts:confirm-email', kwargs={'token': token} ) def test_get_with_valid_token(self, api_client): pending_action = PendingActionFactory(category=ActionCategory.CONFIRM_EMAIL.value) allow(AuthService).confirm_email.and_return(True) expect(AuthService).confirm_email.once() response = api_client.get(self.make_confirm_email_url(pending_action.token)) assert response.status_code == status.HTTP_200_OK def test_get_without_invalid_token(self, api_client): allow(AuthService).confirm_email.and_return(True) expect(AuthService).confirm_email.never() response = api_client.get(self.make_confirm_email_url('invalid_token')) assert response.status_code == status.HTTP_200_OK
1,376
413
import json import time import pytest from contextlib import contextmanager from datetime import datetime, timedelta from functools import wraps from data.database import QueueItem from data.queue import ( WorkQueue, MINIMUM_EXTENSION, queue_items_locked, queue_items_available, queue_items_available_unlocked, ) from test.fixtures import * QUEUE_NAME = "testqueuename" class AutoUpdatingQueue(object): def __init__(self, queue_to_wrap): self._queue = queue_to_wrap def _wrapper(self, func): @wraps(func) def wrapper(*args, **kwargs): to_return = func(*args, **kwargs) self._queue.update_metrics() return to_return return wrapper def __getattr__(self, attr_name): method_or_attr = getattr(self._queue, attr_name) if callable(method_or_attr): return self._wrapper(method_or_attr) else: return method_or_attr TEST_MESSAGE_1 = json.dumps({"data": 1}) TEST_MESSAGE_2 = json.dumps({"data": 2}) TEST_MESSAGES = [json.dumps({"data": str(i)}) for i in range(1, 101)] @contextmanager def fake_transaction(arg): yield @pytest.fixture() def transaction_factory(): return fake_transaction def gauge_value(g): return g.collect()[0].samples[0].value @pytest.fixture() def queue(transaction_factory, initialized_db): return AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory)) def test_get_single_item(queue, transaction_factory): # Add a single item to the queue. queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) # Have two "instances" retrieve an item to claim. Since there is only one, both calls should # return the same item. now = datetime.utcnow() first_item = queue._select_available_item(False, now) second_item = queue._select_available_item(False, now) assert first_item.id == second_item.id assert first_item.state_id == second_item.state_id # Have both "instances" now try to claim the item. Only one should succeed. first_claimed = queue._attempt_to_claim_item(first_item, now, 300) second_claimed = queue._attempt_to_claim_item(first_item, now, 300) assert first_claimed assert not second_claimed # Ensure the item is no longer available. assert queue.get() is None # Ensure the item's state ID has changed. assert first_item.state_id != QueueItem.get().state_id def test_extend_processing(queue, transaction_factory): # Add and retrieve a queue item. queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) queue_item = queue.get(processing_time=10) assert queue_item is not None existing_db_item = QueueItem.get(id=queue_item.id) # Call extend processing with a timedelta less than the minimum and ensure its # processing_expires and state_id do not change. changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1) assert not changed updated_db_item = QueueItem.get(id=queue_item.id) assert existing_db_item.processing_expires == updated_db_item.processing_expires assert existing_db_item.state_id == updated_db_item.state_id # Call extend processing with a timedelta greater than the minimum and ensure its # processing_expires and state_id are changed. changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() + 1) assert changed updated_db_item = QueueItem.get(id=queue_item.id) assert existing_db_item.processing_expires != updated_db_item.processing_expires assert existing_db_item.state_id != updated_db_item.state_id # Call extend processing with a timedelta less than the minimum but also with new data and # ensure its processing_expires and state_id are changed. changed = queue.extend_processing( queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1, updated_data="newbody" ) assert changed updated_db_item = QueueItem.get(id=queue_item.id) assert existing_db_item.processing_expires != updated_db_item.processing_expires assert existing_db_item.state_id != updated_db_item.state_id assert updated_db_item.body == "newbody" def test_same_canonical_names(queue, transaction_factory): queue_items_locked.labels(queue._queue_name).set(0) queue_items_available.labels(queue._queue_name).set(0) queue_items_available_unlocked.labels(queue._queue_name).set(0) id_1 = int(queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)) id_2 = int(queue.put(["abc", "def"], TEST_MESSAGE_2, available_after=-1)) assert id_1 + 1 == id_2 assert not queue._currently_processing assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 one = queue.get(ordering_required=True) assert one is not None assert one.body == TEST_MESSAGE_1 assert queue._currently_processing assert gauge_value(queue_items_locked) == 1 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 two_fail = queue.get(ordering_required=True) assert two_fail is None assert gauge_value(queue_items_locked) == 1 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 queue.complete(one) assert not queue._currently_processing assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 two = queue.get(ordering_required=True) assert two is not None assert queue._currently_processing assert two.body == TEST_MESSAGE_2 assert gauge_value(queue_items_locked) == 1 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 def test_different_canonical_names(queue, transaction_factory): queue_items_locked.labels(queue._queue_name).set(0) queue_items_available.labels(queue._queue_name).set(0) queue_items_available_unlocked.labels(queue._queue_name).set(0) queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) queue.put(["abc", "ghi"], TEST_MESSAGE_2, available_after=-1) assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2 one = queue.get(ordering_required=True) assert one is not None assert one.body == TEST_MESSAGE_1 assert gauge_value(queue_items_locked) == 1 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2 two = queue.get(ordering_required=True) assert two is not None assert two.body == TEST_MESSAGE_2 assert gauge_value(queue_items_locked) == 2 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2 def test_canonical_name(queue, transaction_factory): queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) queue.put(["abc", "def", "ghi"], TEST_MESSAGE_1, available_after=-1) one = queue.get(ordering_required=True) assert QUEUE_NAME + "/abc/def/" != one two = queue.get(ordering_required=True) assert QUEUE_NAME + "/abc/def/ghi/" != two def test_expiration(queue, transaction_factory): queue_items_locked.labels(queue._queue_name).set(0) queue_items_available.labels(queue._queue_name).set(0) queue_items_available_unlocked.labels(queue._queue_name).set(0) queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 one = queue.get(processing_time=0.5, ordering_required=True) assert one is not None assert gauge_value(queue_items_locked) == 1 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 one_fail = queue.get(ordering_required=True) assert one_fail is None time.sleep(1) queue.update_metrics() assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 one_again = queue.get(ordering_required=True) assert one_again is not None assert gauge_value(queue_items_locked) == 1 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 def test_alive(queue, transaction_factory): # No queue item = not alive. assert not queue.alive(["abc", "def"]) # Add a queue item. queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) assert queue.alive(["abc", "def"]) # Retrieve the queue item. queue_item = queue.get() assert queue_item is not None assert queue.alive(["abc", "def"]) # Make sure it is running by trying to retrieve it again. assert queue.get() is None # Delete the queue item. queue.complete(queue_item) assert not queue.alive(["abc", "def"]) def test_specialized_queue(queue, transaction_factory): queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) queue.put(["def", "def"], TEST_MESSAGE_2, available_after=-1) my_queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, ["def"])) two = my_queue.get(ordering_required=True) assert two is not None assert two.body == TEST_MESSAGE_2 one_fail = my_queue.get(ordering_required=True) assert one_fail is None one = queue.get(ordering_required=True) assert one is not None assert one.body == TEST_MESSAGE_1 def test_random_queue_no_duplicates(queue, transaction_factory): for msg in TEST_MESSAGES: queue.put(["abc", "def"], msg, available_after=-1) seen = set() for _ in range(1, 101): item = queue.get() json_body = json.loads(item.body) msg = str(json_body["data"]) assert msg not in seen seen.add(msg) for body in TEST_MESSAGES: json_body = json.loads(body) msg = str(json_body["data"]) assert msg in seen def test_bulk_insert(queue, transaction_factory): queue_items_locked.labels(queue._queue_name).set(0) queue_items_available.labels(queue._queue_name).set(0) queue_items_available_unlocked.labels(queue._queue_name).set(0) with queue.batch_insert() as queue_put: queue_put(["abc", "def"], TEST_MESSAGE_1, available_after=-1) queue_put(["abc", "def"], TEST_MESSAGE_2, available_after=-1) queue.update_metrics() assert not queue._currently_processing assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1 with queue.batch_insert() as queue_put: queue_put(["abd", "def"], TEST_MESSAGE_1, available_after=-1) queue_put(["abd", "ghi"], TEST_MESSAGE_2, available_after=-1) queue.update_metrics() assert not queue._currently_processing assert gauge_value(queue_items_locked) == 0 assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 3 def test_num_available_between(queue, transaction_factory): now = datetime.utcnow() queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-10) queue.put(["abc", "ghi"], TEST_MESSAGE_2, available_after=-5) # Partial results count = queue.num_available_jobs_between(now - timedelta(seconds=8), now, ["abc"]) assert count == 1 # All results count = queue.num_available_jobs_between(now - timedelta(seconds=20), now, ["/abc"]) assert count == 2 # No results count = queue.num_available_jobs_between(now, now, "abc") assert count == 0 def test_incomplete(queue, transaction_factory): # Add an item. queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10) now = datetime.utcnow() count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 1 # Retrieve it. item = queue.get() assert item is not None assert queue._currently_processing # Mark it as incomplete. queue.incomplete(item, retry_after=-1) assert not queue._currently_processing # Retrieve again to ensure it is once again available. same_item = queue.get() assert same_item is not None assert queue._currently_processing assert item.id == same_item.id def test_complete(queue, transaction_factory): # Add an item. queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10) now = datetime.utcnow() count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 1 # Retrieve it. item = queue.get() assert item is not None assert queue._currently_processing # Mark it as complete. queue.complete(item) assert not queue._currently_processing def test_cancel(queue, transaction_factory): # Add an item. queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10) queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_2, available_after=-5) now = datetime.utcnow() count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 2 # Retrieve it. item = queue.get() assert item is not None # Make sure we can cancel it. assert queue.cancel(item.id) now = datetime.utcnow() count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 1 # Make sure it is gone. assert not queue.cancel(item.id) def test_deleted_namespaced_items(queue, transaction_factory): queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, has_namespace=True)) queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10) queue.put(["somenamespace", "abc", "ghi"], TEST_MESSAGE_2, available_after=-5) queue.put(["anothernamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10) # Ensure we have 2 items under `somenamespace` and 1 item under `anothernamespace`. now = datetime.utcnow() count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 2 count = queue.num_available_jobs_between( now - timedelta(seconds=60), now, ["/anothernamespace"] ) assert count == 1 # Delete all `somenamespace` items. queue.delete_namespaced_items("somenamespace") # Check the updated counts. count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 0 count = queue.num_available_jobs_between( now - timedelta(seconds=60), now, ["/anothernamespace"] ) assert count == 1 # Delete all `anothernamespace` items. queue.delete_namespaced_items("anothernamespace") # Check the updated counts. count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"]) assert count == 0 count = queue.num_available_jobs_between( now - timedelta(seconds=60), now, ["/anothernamespace"] ) assert count == 0
15,240
5,134
__version__ = '1.0.19'
24
15
import random def shuffle(cards): for i in range(len(cards)): randindex = random.randrange(0, len(cards)) cards[randindex], cards[i] = cards[i], cards[randindex] def run(): stack = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 'A'] * 4 shuffle(stack) stack_top = 51 dealer_cards = [] your_cards = [] dealer_cards.append(stack[stack_top]) stack_top -= 2 your_cards.append(stack[stack_top]) stack_top -= 2 dealer_cards.append(stack[stack_top]) stack_top -= 2 your_cards.append(stack[stack_top]) stack_top -= 2 print("Dealers cards are " + str(dealer_cards[0]) + " and ?") done = False ace_value = 11 ace_set = 'A' in your_cards score = 0 dealers_score = 0 for i in your_cards: score += i if i != 'A' else ace_value for i in dealer_cards: dealers_score += i if i != 'A' else ace_value while not done: print("Your score is " + str(score)) done = input("Do you want andother card? (Y/N) ").lower() == 'n' if not done: new_card = stack[stack_top] stack_top -= 2 your_cards.append(new_card) score += i if i != 'A' else ace_value if new_card == 'A' and not ace_set: ace_set = True if score > 21: score -= 10 ace_value = 1 if score >= 21: done = True if score > 21: print("You got " + str(score)) print("You lost!") else: print("Dealers score is: " + str(dealers_score)) print("You " + ("won!" if score >= dealers_score else "lost!"))
1,432
653
#! /usr/bin/env python # Copyright (c) 2017, Cuichaowen. All rights reserved. # -*- coding: utf-8 -*- # ops helper dictionary class Dictionary(object): """ Dictionary for op param which needs to be combined """ def __init__(self): self.__dict__ = {} def set_attr(self, **kwargs): """ set dict from kwargs """ for key in kwargs.keys(): if type(kwargs[key]) == type(dict()): for key_inner in kwargs[key].keys(): self.__dict__[key_inner] = kwargs[key][key_inner] else: self.__dict__[key] = kwargs[key] return self def __call__(self): """ call class function to generate dictionary param """ ret = {key: self.__dict__[key] for key in self.__dict__.keys()} return ret ########### Object track and detection helper (for adu(caffe layer type)) Op io define ############# # NMSSSDParameter nms_param = Dictionary().set_attr(need_nms=bool(), overlap_ratio=list(), top_n=list(), add_score=bool(), max_candidate_n=list(), use_soft_nms=list(), nms_among_classes=bool(), voting=list(), vote_iou=list(), nms_gpu_max_n_per_time=int()) # BBoxRegParameter bbox_reg_param = Dictionary().set_attr(bbox_mean=list(), bbox_std=list()) # GenerateAnchorParameter gen_anchor_param = Dictionary().set_attr(base_size=float(), ratios=list(), scales=list(), anchor_width=list(), anchor_height=list(), anchor_x1=list(), anchor_y1=list(), anchor_x2=list(), anchor_y2=list(), zero_anchor_center=bool()) # KPTSParameter kpts_param = Dictionary().set_attr(kpts_exist_bottom_idx=int(), kpts_reg_bottom_idx=int(), kpts_reg_as_classify=bool(), kpts_classify_width=int(), kpts_classify_height=int(), kpts_reg_norm_idx_st=int(), kpts_st_for_each_class=list(), kpts_ed_for_each_class=list(), kpts_classify_pad_ratio=float()) # ATRSParameter # enum NormType { # NONE, # WIDTH, # HEIGHT, # WIDTH_LOG, # HEIGHT_LOG # } atrs_param = Dictionary().set_attr(atrs_reg_bottom_idx=int(), atrs_reg_norm_idx_st=int(), atrs_norm_type=str()) # FTRSParameter ftrs_param = Dictionary().set_attr(ftrs_bottom_idx=int()) # SPMPParameter spmp_param = Dictionary().set_attr(spmp_bottom_idx=int(), spmp_class_aware=list(), spmp_label_width=list(), spmp_label_height=list(), spmp_pad_ratio=list()) # Cam3dParameter cam3d_param = Dictionary().set_attr(cam3d_bottom_idx=int()) # DetectionOutputSSDParameter # enum MIN_SIZE_MODE { # HEIGHT_AND_WIDTH, # HEIGHT_OR_WIDTH # } detection_output_ssd_param = Dictionary().set_attr(nms=nms_param(), threshold=list(), channel_per_scale=int(), class_name_list=str(), num_class=int(), refine_out_of_map_bbox=bool(), class_indexes=list(), heat_map_a=list(), heat_map_b=list(), threshold_objectness=float(), proposal_min_sqrt_area=list(), proposal_max_sqrt_area=list(), bg_as_one_of_softmax=bool(), use_target_type_rcnn=bool(), im_width=float(), im_height=float(), rpn_proposal_output_score=bool(), regress_agnostic=bool(), gen_anchor=gen_anchor_param(), allow_border=float(), allow_border_ratio=float(), bbox_size_add_one=bool(), read_width_scale=float(), read_height_scale=float(), read_height_offset=int(), min_size_h=float(), min_size_w=float(), min_size_mode="HEIGHT_AND_WIDTH", kpts=kpts_param(), atrs=atrs_param(), ftrs=ftrs_param(), spmp=spmp_param(), cam3d=cam3d_param()) # DFMBPSROIPoolingParameter dfmb_psroi_pooling_param = Dictionary().set_attr(heat_map_a=float(), heat_map_b=float(), pad_ratio=float(), output_dim=int(), trans_std=float(), sample_per_part=int(), group_height=int(), group_width=int(), pooled_height=int(), pooled_width=int(), part_height=int(), part_width=int()) # ProposalImgScaleToCamCoordsParameter # # enum NormType { # HEIGHT, # HEIGHT_LOG # } # # enum OrienType { # PI, # PI2 # } proposal_img_scale_to_cam_coords_param = Dictionary().set_attr(num_class=int(), sub_class_num_class=list(), sub_class_bottom_idx=list(), prj_h_norm_type=str(), has_size3d_and_orien3d=bool(), orien_type=str(), cls_ids_zero_size3d_w=list(), cls_ids_zero_size3d_l=list(), cls_ids_zero_orien3d=list(), cmp_pts_corner_3d=bool(), cmp_pts_corner_2d=bool(), ctr_2d_means=list(), ctr_2d_stds=list(), prj_h_means=list(), prj_h_stds=list(), real_h_means=list(), real_h_stds=list(), real_w_means=list(), real_w_stds=list(), real_l_means=list(), real_l_stds=list(), sin_means=list(), sin_stds=list(), cos_means=list(), cos_stds=list(), cam_info_idx_st_in_im_info=int(), im_width_scale=float(), im_height_scale=float(), cords_offset_x=float(), cords_offset_y=float(), bbox_size_add_one=bool(), rotate_coords_by_pitch=bool(), #refine_coords_by_bbox=bool(), #refine_min_dist=float(), #refine_dist_for_height_ratio_one=float(), #max_3d2d_height_ratio_for_min_dist=float(), with_trunc_ratio=bool(), regress_ph_rh_as_whole=bool(), real_h_means_as_whole=list(), real_h_stds_as_whole=list()) # RPNProposalSSD parameter RPNProposalSSD_param = Dictionary().set_attr(detection_output_ssd=detection_output_ssd_param(), bbox_reg=bbox_reg_param())
11,017
2,548