content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
# -*- coding: utf-8 -*- """ Created on Fri Jan 26 12:11:02 2018 @author: User """ def forever15(n): final = (((n*3) + 45)*2)//6 - n print( final) forever15(1550) def findmin(a, b, c): first = a - b second = b - c third = c - a
nilq/baby-python
python
from typing import Generator, Generic, Optional, TypeVar _T = TypeVar("_T") class Ring(Generic[_T]): def __init__(self, value: _T, next_: "Ring[_T]"): self.value = value self.next = next_ def copy(self) -> "Ring[_T]": result = None run = self first = True while first or run != self: first = False result = Ring.create(run.value, result) run = run.next if result is None: raise Exception # Can actually never happen return result.next def __str__(self) -> str: result = str(self.value) run = self.next while run != self: result += str(run.value) run = run.next return result def __repr__(self) -> str: return f"Ring({self.value})" def __contains__(self, item: _T) -> bool: return self.find(item) is not None def __iter__(self) -> Generator["Ring[_T]", None, None]: run = self while run.next != self: yield run run = run.next yield run def __next__(self) -> "Ring[_T]": return self.next def __len__(self) -> int: run = self len = 0 while run.next != self: len += 1 run = run.next return len + 1 def __getitem__(self, item: _T) -> "Ring[_T]": result = self.find(item) if result is None: raise KeyError(f"{item} is not in this ring") return result def append(self, value: _T) -> "Ring[_T]": ring = Ring[_T](value, self.next) self.next = ring return ring def find(self, value: _T) -> Optional["Ring[_T]"]: run = self while run.value != value and run.next != self: run = run.next if run.value == value: return run else: return None def prev(self) -> "Ring[_T]": run = self while run.next != self: run = run.next return run @staticmethod def create(value: _T, prev: Optional["Ring[_T]"]) -> "Ring[_T]": if prev is not None: return prev.append(value) # Mini Ring, points to itself ring: Ring[_T] = Ring[_T](value, None) # type: ignore ring.next = ring return ring
nilq/baby-python
python
# Generated by Django 3.2.3 on 2021-05-31 04:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0005_alter_studentprofile_student'), ] operations = [ migrations.AddField( model_name='studentprofile', name='name', field=models.CharField(default='', max_length=200), ), ]
nilq/baby-python
python
class Solution: def maxProduct(self, nums: List[int]) -> int: ans = nums[0] prevMin = nums[0] prevMax = nums[0] for i in range(1, len(nums)): mini = prevMin * nums[i] maxi = prevMax * nums[i] prevMin = min(nums[i], mini, maxi) prevMax = max(nums[i], mini, maxi) ans = max(ans, prevMax) return ans
nilq/baby-python
python
from __future__ import print_function import os.path import time import sys import six import platform from openpype.api import Logger from openpype.api import get_system_settings from .abstract_provider import AbstractProvider from ..utils import time_function, ResumableError log = Logger().get_logger("SyncServer") try: from googleapiclient.discovery import build import google.oauth2.service_account as service_account from googleapiclient import errors from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload except (ImportError, SyntaxError): if six.PY3: six.reraise(*sys.exc_info()) # handle imports from Python 2 hosts - in those only basic methods are used log.warning("Import failed, imported from Python 2, operations will fail.") SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.readonly'] # for write|delete class GDriveHandler(AbstractProvider): """ Implementation of Google Drive API. As GD API doesn't have real folder structure, 'tree' in memory structure is build in constructor to map folder paths to folder ids, which are used in API. Building of this tree might be expensive and slow and should be run only when necessary. Currently is set to lazy creation, created only after first call when necessary. Configuration for provider is in 'settings/defaults/project_settings/global.json' Settings could be overwritten per project. Example of config: "gdrive": { - site name "provider": "gdrive", - type of provider, label must be registered "credentials_url": "/my_secret_folder/credentials.json", "root": { - could be "root": "/My Drive" for single root "root_one": "/My Drive", "root_two": "/My Drive/different_folder" } } """ CODE = 'gdrive' LABEL = 'Google Drive' FOLDER_STR = 'application/vnd.google-apps.folder' MY_DRIVE_STR = 'My Drive' # name of root folder of regular Google drive CHUNK_SIZE = 2097152 # must be divisible by 256! used for upload chunks def __init__(self, project_name, site_name, tree=None, presets=None): self.active = False self.project_name = project_name self.site_name = site_name self.service = None self.root = None self.presets = presets if not self.presets: log.info("Sync Server: There are no presets for {}.". format(site_name)) return provider_presets = self.presets.get(self.CODE) if not provider_presets: msg = "Sync Server: No provider presets for {}".format(self.CODE) log.info(msg) return cred_path = self.presets[self.CODE].get("credentials_url", {}).\ get(platform.system().lower()) or '' if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ "for '{}' on path '{}'!".format(site_name, cred_path) log.info(msg) return self.service = self._get_gd_service(cred_path) self._tree = tree self.active = True def is_active(self): """ Returns True if provider is activated, eg. has working credentials. Returns: (boolean) """ return self.service is not None @classmethod def get_system_settings_schema(cls): """ Returns dict for editable properties on system settings level Returns: (list) of dict """ return [] @classmethod def get_project_settings_schema(cls): """ Returns dict for editable properties on project settings level Returns: (list) of dict """ # {platform} tells that value is multiplatform and only specific OS # should be returned editable = [ # credentials could be overriden on Project or User level { 'key': "credentials_url", 'label': "Credentials url", 'type': 'text' }, # roots could be overriden only on Project leve, User cannot { 'key': "roots", 'label': "Roots", 'type': 'dict' } ] return editable @classmethod def get_local_settings_schema(cls): """ Returns dict for editable properties on local settings level Returns: (dict) """ editable = [ # credentials could be override on Project or User level { 'key': "credentials_url", 'label': "Credentials url", 'type': 'text', 'namespace': '{project_settings}/global/sync_server/sites/{site}/credentials_url/{platform}' # noqa: E501 } ] return editable def get_roots_config(self, anatomy=None): """ Returns root values for path resolving Use only Settings as GDrive cannot be modified by Local Settings Returns: (dict) - {"root": {"root": "/My Drive"}} OR {"root": {"root_ONE": "value", "root_TWO":"value}} Format is importing for usage of python's format ** approach """ # GDrive roots cannot be locally overridden return self.presets['root'] def get_tree(self): """ Building of the folder tree could be potentially expensive, constructor provides argument that could inject previously created tree. Tree structure must be handled in thread safe fashion! Returns: (dictionary) - url to id mapping """ if not self._tree: self._tree = self._build_tree(self.list_folders()) return self._tree def create_folder(self, path): """ Create all nonexistent folders and subfolders in 'path'. Updates self._tree structure with new paths Args: path (string): absolute path, starts with GDrive root, without filename Returns: (string) folder id of lowest subfolder from 'path' """ folder_id = self.folder_path_exists(path) if folder_id: return folder_id parts = path.split('/') folders_to_create = [] while parts: folders_to_create.append(parts.pop()) path = '/'.join(parts) path = path.strip() folder_id = self.folder_path_exists(path) # lowest common path if folder_id: while folders_to_create: new_folder_name = folders_to_create.pop() folder_metadata = { 'name': new_folder_name, 'mimeType': 'application/vnd.google-apps.folder', 'parents': [folder_id] } folder = self.service.files().create( body=folder_metadata, supportsAllDrives=True, fields='id').execute() folder_id = folder["id"] new_path_key = path + '/' + new_folder_name self.get_tree()[new_path_key] = {"id": folder_id} path = new_path_key return folder_id def upload_file(self, source_path, path, server, collection, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. It creates all folders on the path if are not existing. Args: source_path (string): path (string): absolute path with or without name of the file overwrite (boolean): replace existing file arguments for saving progress: server (SyncServer): server instance to call update_db on collection (str): name of collection file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name Returns: (string) file_id of created/modified file , throws FileExistsError, FileNotFoundError exceptions """ if not os.path.isfile(source_path): raise FileNotFoundError("Source file {} doesn't exist." .format(source_path)) root, ext = os.path.splitext(path) if ext: # full path target_name = os.path.basename(path) path = os.path.dirname(path) else: target_name = os.path.basename(source_path) target_file = self.file_path_exists(path + "/" + target_name) if target_file and not overwrite: raise FileExistsError("File already exists, " "use 'overwrite' argument") folder_id = self.folder_path_exists(path) if not folder_id: raise NotADirectoryError("Folder {} doesn't exists".format(path)) file_metadata = { 'name': target_name } media = MediaFileUpload(source_path, mimetype='application/octet-stream', chunksize=self.CHUNK_SIZE, resumable=True) try: if not target_file: # update doesnt like parent file_metadata['parents'] = [folder_id] request = self.service.files().create(body=file_metadata, supportsAllDrives=True, media_body=media, fields='id') else: request = self.service.files().update(fileId=target_file["id"], body=file_metadata, supportsAllDrives=True, media_body=media, fields='id') media.stream() log.debug("Start Upload! {}".format(source_path)) last_tick = status = response = None status_val = 0 while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, project_name=collection): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() log.debug("Uploaded %d%%." % int(status_val * 100)) server.update_db(collection=collection, new_file_id=None, file=file, representation=representation, site=site, progress=status_val ) status, response = request.next_chunk() except errors.HttpError as ex: if ex.resp['status'] == '404': return False if ex.resp['status'] == '403': # real permission issue if 'has not granted' in ex._get_reason().strip(): raise PermissionError(ex._get_reason().strip()) log.warning("Forbidden received, hit quota. " "Injecting 60s delay.") time.sleep(60) return False raise return response['id'] def download_file(self, source_path, local_path, server, collection, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'local_path'. It creates all folders on the local_path if are not existing. By default existing file on 'local_path' will trigger an exception Args: source_path (string): absolute path on provider local_path (string): absolute path with or without name of the file overwrite (boolean): replace existing file arguments for saving progress: server (SyncServer): server instance to call update_db on collection (str): name of collection file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name Returns: (string) file_id of created/modified file , throws FileExistsError, FileNotFoundError exceptions """ remote_file = self.file_path_exists(source_path) if not remote_file: raise FileNotFoundError("Source file {} doesn't exist." .format(source_path)) root, ext = os.path.splitext(local_path) if ext: # full path with file name target_name = os.path.basename(local_path) local_path = os.path.dirname(local_path) else: # just folder, get file name from source target_name = os.path.basename(source_path) local_file = os.path.isfile(local_path + "/" + target_name) if local_file and not overwrite: raise FileExistsError("File already exists, " "use 'overwrite' argument") request = self.service.files().get_media(fileId=remote_file["id"], supportsAllDrives=True) with open(local_path + "/" + target_name, "wb") as fh: downloader = MediaIoBaseDownload(fh, request) last_tick = status = response = None status_val = 0 while response is None: if server.is_representation_paused(representation['_id'], check_parents=True, project_name=collection): raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() log.debug("Downloaded %d%%." % int(status_val * 100)) server.update_db(collection=collection, new_file_id=None, file=file, representation=representation, site=site, progress=status_val ) status, response = downloader.next_chunk() return target_name def delete_folder(self, path, force=False): """ Deletes folder on GDrive. Checks if folder contains any files or subfolders. In that case raises error, could be overriden by 'force' argument. In that case deletes folder on 'path' and all its children. Args: path (string): absolute path on GDrive force (boolean): delete even if children in folder Returns: None """ folder_id = self.folder_path_exists(path) if not folder_id: raise ValueError("Not valid folder path {}".format(path)) fields = 'nextPageToken, files(id, name, parents)' q = self._handle_q("'{}' in parents ".format(folder_id)) response = self.service.files().list( q=q, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, pageSize='1', fields=fields).execute() children = response.get('files', []) if children and not force: raise ValueError("Folder {} is not empty, use 'force'". format(path)) self.service.files().delete(fileId=folder_id, supportsAllDrives=True).execute() def delete_file(self, path): """ Deletes file from 'path'. Expects path to specific file. Args: path: absolute path to particular file Returns: None """ file = self.file_path_exists(path) if not file: raise ValueError("File {} doesn't exist") self.service.files().delete(fileId=file["id"], supportsAllDrives=True).execute() def list_folder(self, folder_path): """ List all files and subfolders of particular path non-recursively. Args: folder_path (string): absolut path on provider Returns: (list) """ pass @time_function def list_folders(self): """ Lists all folders in GDrive. Used to build in-memory structure of path to folder ids model. Returns: (list) of dictionaries('id', 'name', [parents]) """ folders = [] page_token = None fields = 'nextPageToken, files(id, name, parents)' while True: q = self._handle_q("mimeType='application/vnd.google-apps.folder'") response = self.service.files().list( q=q, pageSize=1000, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, fields=fields, pageToken=page_token).execute() folders.extend(response.get('files', [])) page_token = response.get('nextPageToken', None) if page_token is None: break return folders def list_files(self): """ Lists all files in GDrive Runs loop through possibly multiple pages. Result could be large, if it would be a problem, change it to generator Returns: (list) of dictionaries('id', 'name', [parents]) """ files = [] page_token = None fields = 'nextPageToken, files(id, name, parents)' while True: q = self._handle_q("") response = self.service.files().list( q=q, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, fields=fields, pageToken=page_token).execute() files.extend(response.get('files', [])) page_token = response.get('nextPageToken', None) if page_token is None: break return files def folder_path_exists(self, file_path): """ Checks if path from 'file_path' exists. If so, return its folder id. Args: file_path (string): gdrive path with / as a separator Returns: (string) folder id or False """ if not file_path: return False root, ext = os.path.splitext(file_path) if not ext: file_path += '/' dir_path = os.path.dirname(file_path) path = self.get_tree().get(dir_path, None) if path: return path["id"] return False def file_path_exists(self, file_path): """ Checks if 'file_path' exists on GDrive Args: file_path (string): separated by '/', from root, with file name Returns: (dictionary|boolean) file metadata | False if not found """ folder_id = self.folder_path_exists(file_path) if folder_id: return self.file_exists(os.path.basename(file_path), folder_id) return False def file_exists(self, file_name, folder_id): """ Checks if 'file_name' exists in 'folder_id' Args: file_name (string): folder_id (int): google drive folder id Returns: (dictionary|boolean) file metadata, False if not found """ q = self._handle_q("name = '{}' and '{}' in parents" .format(file_name, folder_id)) response = self.service.files().list( q=q, corpora="allDrives", includeItemsFromAllDrives=True, supportsAllDrives=True, fields='nextPageToken, files(id, name, parents, ' 'mimeType, modifiedTime,size,md5Checksum)').execute() if len(response.get('files')) > 1: raise ValueError("Too many files returned for {} in {}" .format(file_name, folder_id)) file = response.get('files', []) if not file: return False return file[0] @classmethod def get_presets(cls): """ Get presets for this provider Returns: (dictionary) of configured sites """ provider_presets = None try: provider_presets = ( get_system_settings()["modules"] ["sync_server"] ["providers"] ["gdrive"] ) except KeyError: log.info(("Sync Server: There are no presets for Gdrive " + "provider."). format(str(provider_presets))) return return provider_presets def _get_gd_service(self, credentials_path): """ Authorize client with 'credentials.json', uses service account. Service account needs to have target folder shared with. Produces service that communicates with GDrive API. Returns: None """ service = None try: creds = service_account.Credentials.from_service_account_file( credentials_path, scopes=SCOPES) service = build('drive', 'v3', credentials=creds, cache_discovery=False) except Exception: log.error("Connection failed, " + "check '{}' credentials file".format(credentials_path), exc_info=True) return service def _prepare_root_info(self): """ Prepare info about roots and theirs folder ids from 'presets'. Configuration might be for single or multiroot projects. Regular My Drive and Shared drives are implemented, their root folder ids need to be queried in slightly different way. Returns: (dicts) of dicts where root folders are keys throws ResumableError in case of errors.HttpError """ roots = {} config_roots = self.get_roots_config() try: for path in config_roots.values(): if self.MY_DRIVE_STR in path: roots[self.MY_DRIVE_STR] = self.service.files()\ .get(fileId='root')\ .execute() else: shared_drives = [] page_token = None while True: response = self.service.drives().list( pageSize=100, pageToken=page_token).execute() shared_drives.extend(response.get('drives', [])) page_token = response.get('nextPageToken', None) if page_token is None: break folders = path.split('/') if len(folders) < 2: raise ValueError("Wrong root folder definition {}". format(path)) for shared_drive in shared_drives: if folders[1] in shared_drive["name"]: roots[shared_drive["name"]] = { "name": shared_drive["name"], "id": shared_drive["id"]} if self.MY_DRIVE_STR not in roots: # add My Drive always roots[self.MY_DRIVE_STR] = self.service.files() \ .get(fileId='root').execute() except errors.HttpError: log.warning("HttpError in sync loop, " "trying next loop", exc_info=True) raise ResumableError return roots @time_function def _build_tree(self, folders): """ Create in-memory structure resolving paths to folder id as recursive querying might be slower. Initialized in the time of class initialization. Maybe should be persisted Tree is structure of path to id: '/ROOT': {'id': '1234567'} '/ROOT/PROJECT_FOLDER': {'id':'222222'} '/ROOT/PROJECT_FOLDER/Assets': {'id': '3434545'} Args: folders (list): list of dictionaries with folder metadata Returns: (dictionary) path as a key, folder id as a value """ log.debug("build_tree len {}".format(len(folders))) if not self.root: # build only when necessary, could be expensive self.root = self._prepare_root_info() root_ids = [] default_root_id = None tree = {} ending_by = {} for root_name, root in self.root.items(): # might be multiple roots if root["id"] not in root_ids: tree["/" + root_name] = {"id": root["id"]} ending_by[root["id"]] = "/" + root_name root_ids.append(root["id"]) if self.MY_DRIVE_STR == root_name: default_root_id = root["id"] no_parents_yet = {} while folders: folder = folders.pop(0) parents = folder.get("parents", []) # weird cases, shared folders, etc, parent under root if not parents: parent = default_root_id else: parent = parents[0] if folder["id"] in root_ids: # do not process root continue if parent in ending_by: path_key = ending_by[parent] + "/" + folder["name"] ending_by[folder["id"]] = path_key tree[path_key] = {"id": folder["id"]} else: no_parents_yet.setdefault(parent, []).append((folder["id"], folder["name"])) loop_cnt = 0 # break if looped more then X times - safety against infinite loop while no_parents_yet and loop_cnt < 20: keys = list(no_parents_yet.keys()) for parent in keys: if parent in ending_by.keys(): subfolders = no_parents_yet.pop(parent) for folder_id, folder_name in subfolders: path_key = ending_by[parent] + "/" + folder_name ending_by[folder_id] = path_key tree[path_key] = {"id": folder_id} loop_cnt += 1 if len(no_parents_yet) > 0: log.debug("Some folders path are not resolved {}". format(no_parents_yet)) log.debug("Remove deleted folders from trash.") return tree def _get_folder_metadata(self, path): """ Get info about folder with 'path' Args: path (string): Returns: (dictionary) with metadata or raises ValueError """ try: return self.get_tree()[path] except Exception: raise ValueError("Uknown folder id {}".format(id)) def _handle_q(self, q, trashed=False): """ API list call contain trashed and hidden files/folder by default. Usually we dont want those, must be included in query explicitly. Args: q (string): query portion trashed (boolean): False|True Returns: (string) - modified query """ parts = [q] if not trashed: parts.append(" trashed = false ") return " and ".join(parts) if __name__ == '__main__': gd = GDriveHandler('gdrive') print(gd.root) print(gd.get_tree())
nilq/baby-python
python
# coding: utf8 from __future__ import unicode_literals from .stop_words import STOP_WORDS from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS from ...language import Language from ...attrs import LANG, NORM from ...util import update_exc, add_lookups class CroatianDefaults(Language.Defaults): lex_attr_getters = dict(Language.Defaults.lex_attr_getters) lex_attr_getters[LANG] = lambda text: "hr" lex_attr_getters[NORM] = add_lookups( Language.Defaults.lex_attr_getters[NORM], BASE_NORMS ) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS) stop_words = STOP_WORDS class Croatian(Language): lang = "hr" Defaults = CroatianDefaults __all__ = ["Croatian"]
nilq/baby-python
python
""" Data related functions and objects. This module contains several parts: - `data_loader` defines a DataLoader objects that behave similar to pandas DataFrame and are used to load data. In addition it provides DataLoader wrappers that implement various transformations on the loaded dataset. - `data_generator` defines a DataGenerator object that takes a DataLoader as input and creates batches of data from it. This submodule also defines a number of wrappers that apply transformation to the generated batches of data. - `data` file defines a number of routines to simplify data handling. """ from .data import load_data, create_data_generators, construct_data_loader __all__ = [ 'load_data', 'create_data_generators', 'construct_data_loader' ]
nilq/baby-python
python
from .ner_labels import NERLabels from .ner_dataset import NERDataset from .label_mapper import LabelMapper from .dataset_tokenizer import DatasetTokenizer __all__=["NERLabels", "NERDataset", "LabelMapper", "DatasetTokenizer"]
nilq/baby-python
python
from django.shortcuts import render, redirect from django.contrib.auth.models import User, auth from django.contrib import messages from django.shortcuts import get_object_or_404 from .models import * from .forms import * from datetime import datetime, timedelta from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, EmptyPage def index(request): return render(request, 'index.html') def register(request): if request.method == 'POST': username = request.POST['username'] email = request.POST['email'] password = request.POST['password'] password2 = request.POST['password2'] if password == password2: if User.objects.filter(email=email).exists(): messages.info(request, 'Email already used!') return redirect('register') elif User.objects.filter(username=username).exists(): messages.info(request, 'Username already used!') return redirect('register') else: user = User.objects.create_user(username=username, email=email, password=password) user.save() return redirect('login') else: messages.info(request, 'Passwords dont match') return redirect('register') else: return render(request, 'register.html') def login(request): if request.method == 'POST': username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not None: auth.login(request, user) return redirect('/') else: messages.info(request, 'User doesnt exists') return redirect('login') else: return render(request, 'login.html') @login_required def logout(request): auth.logout(request) return redirect('/') def rooms(request): free_rooms = Room.objects.filter(is_reserved=False) p = Paginator(free_rooms, 1) page_num = request.GET.get('page', 1) try: page = p.page(page_num) except EmptyPage: page = p.page(1) return render(request, 'rooms.html', {'rooms': page}) def search_rooms(request): searched = request.POST['search_value'] obj = Room.objects.filter(number=searched, is_reserved=False) return render(request, 'search_rooms.html', {'room': obj}) def room(request, pk): room = Room.objects.get(id=pk) comments = Comment.objects.filter(room=room) return render(request, 'room.html', {'room': room, 'comments': comments}) @login_required def reservation(request, pk): obj = get_object_or_404(Room, id=pk) user = request.user if request.method == "POST": form = ReservationForm(request.POST) if form['arrival_date'].value() != form['departure_date'].value(): if form.is_valid(): res = form.save(commit=False) res.user = user obj.is_reserved = True res.room = obj obj.save() res.save() return redirect('/') else: messages.info(request, 'Arrival and departure date are equal') else: form = ReservationForm() return render(request, 'reservation.html', {'form': form, 'room': obj}) @login_required def profile(request): user = request.user reservations = Reservation.objects.all().order_by('-reserve_time') return render(request, 'profile.html', {'reservations': reservations, 'user': user}) @login_required def delete_reservation(request, pk): obj = get_object_or_404(Reservation, id=pk) room = obj.room room.is_reserved = False room.save() obj.delete() return redirect('/profile') @login_required def edit_reservation(request, pk): obj = get_object_or_404(Reservation, id=pk) room = obj.room form = ReservationForm(request.POST or None, instance=obj) if form['arrival_date'].value() != form['departure_date'].value(): if form.is_valid(): res = form.save(commit=False) res.reserve_time = datetime.now() res.save() return redirect('/profile') else: messages.info(request, 'Arrival and departure date are equal') return render(request, 'reservation.html', {'form': form, 'room': room}) @login_required def add_comment(request, pk): obj = get_object_or_404(Room, id=pk) user = request.user if request.method == 'POST': form = CommentForm(request.POST) if form['text'].value(): if form['rate'].value(): if form.is_valid(): com = form.save(commit=False) com.user = user com.room = obj com.save() return redirect('/rooms') else: messages.info(request, 'You must rate the room!') else: messages.info(request, 'You should type something!') else: form = CommentForm() return render(request, 'comment.html', {'form': form}) @login_required def show_last_month(request): obj = Reservation.objects.filter(departure_date__gt=datetime.now() - timedelta(days=30)) print(datetime.now() - timedelta(30)) return render(request, 'lastmonth.html', {'objects': obj})
nilq/baby-python
python
from .accuracy import Accuracy, accuracy __all__ = ['Accuracy', 'accuracy']
nilq/baby-python
python
import sys """ File name: scenario_modifier Date created: 03/03/2019 Feature: # Feature to enable the user to overwrite the scenario file. """ __author__ = "Alexander Kell" __copyright__ = "Copyright 2018, Alexander Kell" __license__ = "MIT" __email__ = "alexander@kell.es" def overwrite_scenario_file(scenario_file): sys.modules['elecsim'].scenario.scenario_data=scenario_file
nilq/baby-python
python
class User: """Represents a MangaDex User.""" __slots__ = ("id", "username", "roles", "client") def __init__(self, data, client): self.id = data.get("id") _attrs = data.get("attributes") self.username = _attrs.get("username") self.roles = _attrs.get("roles", []) self.client = client
nilq/baby-python
python
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com> # Ibrahim Merad <imerad7@gmail.com> # License: BSD 3 clause """ This module implement the ``GMOM`` class for the geometric median-of-means robust estimator. ``StateGMOM`` is a place-holder for the GMOM estimator containing: gradient: numpy.ndarray A numpy array of shape (n_weights,) containing gradients computed by the `grad` function returned by the `grad_factory` factory function. TODO: fill the missing things in StateCH """ from collections import namedtuple import numpy as np from numba import jit from ._base import Estimator, jit_kwargs from .._utils import np_float @jit(**jit_kwargs) def gmom_njit(xs, tol=1e-4): # from Vardi and Zhang 2000 n_elem, n_dim = xs.shape # TODO : avoid the memory allocations in this function y = np.zeros(n_dim) dists = np.zeros(n_elem) inv_dists = np.zeros(n_elem) xsy = np.zeros_like(xs) for i in range(n_elem): y += xs[i] y /= n_elem eps = 1e-10 delta = 1 niter = 0 while delta > tol: xsy[:] = xs - y dists.fill(0.0) for j in range(n_dim): dists[:] += xsy[:, j] * xsy[:, j] # np.linalg.norm(xsy, axis=1) for i in range(n_elem): dists[i] = np.sqrt(dists[i]) # dists[:] = euclidean_numba1(xs, [y]).flatten() mask = dists < eps nmask = np.logical_not(mask) inv_dists[nmask] = 1 / dists[nmask] # print("pass2") inv_dists[mask] = 0 nb_too_close = mask.sum() ry = np.sqrt( np.sum(np.dot(inv_dists, xsy) ** 2) ) # np.linalg.norm(np.dot(inv_dists, xsy)) if ry == 0: break cst = nb_too_close / ry sum_inv_dists = np.sum(inv_dists) if sum_inv_dists == 0: raise ValueError y_new = ( max(0, 1 - cst) * np.dot(inv_dists, xs) / sum_inv_dists + min(1, cst) * y ) delta = np.sqrt(np.sum((y - y_new) ** 2)) # np.linalg.norm(y - y_new) y = y_new niter += 1 return y, niter * (n_elem + 1) @jit(**jit_kwargs) def gmom_njit2(X, tol=1e-5): n_elem, n_dim = X.shape y = np.zeros(n_dim) for i in range(n_elem): y += X[i] y /= n_elem D = np.zeros((n_elem, 1)) while True: D.fill(0.0) for i in range(n_elem): for j in range(n_dim): D[i] += (X[i, j] - y[j]) ** 2 D[i] = np.sqrt(D[i]) # D = cdist(X, [y]) nonzeros = (D != 0)[:, 0] Dinv = 1 / D[nonzeros] Dinvs = np.sum(Dinv) W = Dinv / Dinvs T = np.sum(W * X[nonzeros], 0) num_zeros = n_elem - np.sum(nonzeros) if num_zeros == 0: y1 = T elif num_zeros == n_elem: return (y, 0) else: R = (T - y) * Dinvs r = np.linalg.norm(R) rinv = 0 if r == 0 else num_zeros / r y1 = max(0, 1 - rinv) * T + min(1, rinv) * y if np.linalg.norm(y - y1) < tol: return (y1, 0) y = y1 StateGMOM = namedtuple( "StateGMOM", [ "block_means", "sample_indices", "grads_sum_block", "gradient", "loss_derivative", "partial_derivative", ], ) class GMOM(Estimator): def __init__(self, X, y, loss, n_classes, fit_intercept, n_samples_in_block): super().__init__(X, y, loss, n_classes, fit_intercept) self.n_samples_in_block = n_samples_in_block if n_samples_in_block <= 0: raise ValueError self.n_blocks = self.n_samples // n_samples_in_block self.last_block_size = self.n_samples % n_samples_in_block if self.last_block_size > 0: self.n_blocks += 1 def get_state(self): return StateGMOM( block_means=np.empty( ( self.n_blocks, self.n_features + int(self.fit_intercept), self.n_classes, ), dtype=np_float, ), sample_indices=np.arange(self.n_samples, dtype=np.uintp), grads_sum_block=np.empty( (self.n_features + int(self.fit_intercept), self.n_classes), dtype=np_float, ), gradient=np.empty( (self.n_features + int(self.fit_intercept), self.n_classes), dtype=np_float, ), loss_derivative=np.empty(self.n_classes, dtype=np_float), partial_derivative=np.empty(self.n_classes, dtype=np_float), ) def partial_deriv_factory(self): raise ValueError( "gmom estimator does not support CGD, use mom estimator instead" ) def grad_factory(self): X = self.X y = self.y loss = self.loss deriv_loss = loss.deriv_factory() n_samples_in_block = self.n_samples_in_block n_classes = self.n_classes n_features = self.n_features n_blocks = self.n_blocks last_block_size = self.last_block_size if self.fit_intercept: @jit(**jit_kwargs) def grad(inner_products, state): sample_indices = state.sample_indices block_means = state.block_means gradient = state.gradient # Cumulative sum in the block grads_sum_block = state.grads_sum_block # for i in range(n_samples): # sample_indices[i] = i np.random.shuffle(sample_indices) for j in range(n_features + 1): for k in range(n_classes): grads_sum_block[j, k] = 0.0 # Block counter counter = 0 deriv = state.loss_derivative for i, idx in enumerate(sample_indices): deriv_loss(y[idx], inner_products[idx], deriv) for k in range(n_classes): grads_sum_block[0, k] += deriv[k] for j in range(n_features): grads_sum_block[j + 1, k] += ( X[idx, j] * deriv[k] ) # np.outer(X[idx], deriv) if ((i != 0) and ((i + 1) % n_samples_in_block == 0)) or n_samples_in_block == 1: for j in range(n_features + 1): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / n_samples_in_block ) grads_sum_block[j, k] = 0.0 counter += 1 if last_block_size != 0: for j in range(n_features + 1): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / last_block_size ) # TODO : possible optimizations in the next line by rewriting gmom_njit with out parameter # and preallocated place holders ... gmom_grad, sc_prods = gmom_njit(block_means.reshape((n_blocks, -1))) gradient[:] = gmom_grad.reshape( block_means.shape[1:] ) return sc_prods return grad else: @jit(**jit_kwargs) def grad(inner_products, state): sample_indices = state.sample_indices block_means = state.block_means gradient = state.gradient # Cumulative sum in the block grads_sum_block = state.grads_sum_block # for i in range(n_samples): # sample_indices[i] = i np.random.shuffle(sample_indices) # Cumulative sum in the block for j in range(n_features): for k in range(n_classes): grads_sum_block[j, k] = 0.0 # Block counter counter = 0 deriv = state.loss_derivative for i, idx in enumerate(sample_indices): deriv_loss(y[idx], inner_products[idx], deriv) for j in range(n_features): for k in range(n_classes): grads_sum_block[j, k] += X[idx, j] * deriv[k] if (i != 0) and ((i + 1) % n_samples_in_block == 0): for j in range(n_features): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / n_samples_in_block ) grads_sum_block[j, k] = 0.0 counter += 1 if last_block_size != 0: for j in range(n_features): for k in range(n_classes): block_means[counter, j, k] = ( grads_sum_block[j, k] / last_block_size ) # TODO : possible optimizations in the next line by rewriting gmom_njit with out parameter # and preallocated place holders ... gmom_grad, sc_prods = gmom_njit(block_means.reshape((n_blocks, -1))) gradient[:] = gmom_grad.reshape( block_means.shape[1:] ) return sc_prods return grad
nilq/baby-python
python
#!/usr/bin/python3 # -*- coding:utf-8 -*- from os import listdir from os.path import splitext, join import markdown import yaml import json def read(uri): with open(uri, 'r') as f: return f.read() def text_to_yml_md(tex): tex = tex.strip('-') sep = '---' if sep in tex: return tex.split(sep) else: return (None, tex) def extract_info(path, mode, keys): list_files = listdir(path) resp = {} for file in list_files: n, e = splitext(file) if mode == 'yaml': info = yaml.safe_load(read(join(path, file))) if mode == 'markdown': yml, md = text_to_yml_md(read(join(path, file))) info = yaml.safe_load(yml) for k in keys: if not n in resp: resp[n] = {} if k in info: resp[n][k] = info[k] return resp def main(): file_dat = '../cfg/data-automatic.js' data = { 'pages': extract_info('../public_html/dat/pages/', 'markdown', ['title']), 'series': extract_info('../public_html/dat/series/', 'yaml', ['title-es', 'title-en']), #'news': extract_info('../public_html/dat/news/', 'markdown', ['title', 'date']), #'press': extract_info('../public_html/dat/press/', 'markdown', ['title', 'date']), } with open(file_dat, 'w') as f: json_tex = json.dumps(data) f.write(f'const data = {json_tex};') if __name__ == '__main__': main()
nilq/baby-python
python
#!/usr/bin/env python3 # Copyright (c) 2016 Fabian Schuiki # # This script generates GDS layout data for a memory macro. import sys, os, argparse from potstill.macro import Macro from potstill.layout import Layout from potstill.output.gds import make_gds, make_phalanx_input # Parse the command line arguments. parser = argparse.ArgumentParser(prog="potstill make-gds", description="Generate the GDS layout data of a memory macro.") parser.add_argument("NADDR", type=int, help="number of address lines") parser.add_argument("NBITS", type=int, help="number of bits per word") parser.add_argument("-o", "--output", metavar="GDSFILE", type=str, help="name of the output GDS file") parser.add_argument("-p", "--phalanx", action="store_true", help="write Phalanx input file to stdout") args = parser.parse_args() # Calculate the layout. macro = Macro(args.NADDR, args.NBITS) layout = Layout(macro) filename = args.output or (macro.name+".gds") # Dump the input file to stdout if requested. if args.phalanx: sys.stdout.write(make_phalanx_input(layout, filename)) sys.exit(0) # Generate GDS output. make_gds(layout, filename)
nilq/baby-python
python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['EnvironmentVariableArgs', 'EnvironmentVariable'] @pulumi.input_type class EnvironmentVariableArgs: def __init__(__self__, *, key: pulumi.Input[str], value: pulumi.Input[str], locked: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a EnvironmentVariable resource. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) if locked is not None: pulumi.set(__self__, "locked", locked) @property @pulumi.getter def key(self) -> pulumi.Input[str]: return pulumi.get(self, "key") @key.setter def key(self, value: pulumi.Input[str]): pulumi.set(self, "key", value) @property @pulumi.getter def value(self) -> pulumi.Input[str]: return pulumi.get(self, "value") @value.setter def value(self, value: pulumi.Input[str]): pulumi.set(self, "value", value) @property @pulumi.getter def locked(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "locked") @locked.setter def locked(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "locked", value) @pulumi.input_type class _EnvironmentVariableState: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering EnvironmentVariable resources. """ if key is not None: pulumi.set(__self__, "key", key) if locked is not None: pulumi.set(__self__, "locked", locked) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def locked(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "locked") @locked.setter def locked(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "locked", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) class EnvironmentVariable(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None, __props__=None): """ ## Example Usage ```python import pulumi import pulumi_checkly as checkly # Simple Enviroment Variable example variable_1 = checkly.EnvironmentVariable("variable-1", key="API_KEY", locked=True, value="loZd9hOGHDUrGvmW") variable_2 = checkly.EnvironmentVariable("variable-2", key="API_URL", value="http://localhost:3000") ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. """ ... @overload def __init__(__self__, resource_name: str, args: EnvironmentVariableArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## Example Usage ```python import pulumi import pulumi_checkly as checkly # Simple Enviroment Variable example variable_1 = checkly.EnvironmentVariable("variable-1", key="API_KEY", locked=True, value="loZd9hOGHDUrGvmW") variable_2 = checkly.EnvironmentVariable("variable-2", key="API_URL", value="http://localhost:3000") ``` :param str resource_name: The name of the resource. :param EnvironmentVariableArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(EnvironmentVariableArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EnvironmentVariableArgs.__new__(EnvironmentVariableArgs) if key is None and not opts.urn: raise TypeError("Missing required property 'key'") __props__.__dict__["key"] = key __props__.__dict__["locked"] = locked if value is None and not opts.urn: raise TypeError("Missing required property 'value'") __props__.__dict__["value"] = value super(EnvironmentVariable, __self__).__init__( 'checkly:index/environmentVariable:EnvironmentVariable', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, key: Optional[pulumi.Input[str]] = None, locked: Optional[pulumi.Input[bool]] = None, value: Optional[pulumi.Input[str]] = None) -> 'EnvironmentVariable': """ Get an existing EnvironmentVariable resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _EnvironmentVariableState.__new__(_EnvironmentVariableState) __props__.__dict__["key"] = key __props__.__dict__["locked"] = locked __props__.__dict__["value"] = value return EnvironmentVariable(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def key(self) -> pulumi.Output[str]: return pulumi.get(self, "key") @property @pulumi.getter def locked(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "locked") @property @pulumi.getter def value(self) -> pulumi.Output[str]: return pulumi.get(self, "value")
nilq/baby-python
python
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long, too-many-instance-attributes import logging import timeit import inspect from knack.log import get_logger from azure.cli.core.commands.client_factory import get_subscription_id from .telemetry import _track_command_telemetry, _track_run_command_telemetry from .repair_utils import _get_function_param_dict STATUS_SUCCESS = 'SUCCESS' STATUS_ERROR = 'ERROR' VM_REPAIR_RUN_COMMAND = 'vm repair run' class command_helper(object): """ The command helper stores command state data and helper functions for vm-repair commands. It will also execute needed functions at the start and end of commands such as sending telemetry data and displaying progress controller """ def __init__(self, logger, cmd, command_name): """ The command helper object should always be initialized at the start of a command """ # Start timer for custom telemetry self.start_time = timeit.default_timer() # Fetch and store command parameters self.command_params = _get_function_param_dict(inspect.getouterframes(inspect.currentframe())[1].frame) # Logger self.logger = logger # CLI cmd object self.cmd = cmd # Command name self.command_name = command_name # Init script data if command is vm repair run if command_name == VM_REPAIR_RUN_COMMAND: self.script = script_data() self.script.run_id = self.command_params['run_id'] # Return message self.message = '' # Return error message self.error_message = '' # Return Status: STATUS_SUCCESS | STATUS_ERROR self.status = '' # Error stack trace self.error_stack_trace = '' # Return dict self.return_dict = {} # Verbose flag for command self.is_verbose = any(handler.level == logging.INFO for handler in get_logger().handlers) # Begin progress reporting for long running operation if not verbose if not self.is_verbose: self.cmd.cli_ctx.get_progress_controller().begin() self.cmd.cli_ctx.get_progress_controller().add(message='Running') def __del__(self): """ This object will have the same life time as an invoked command. We will run all telemetry and clean-up work through the destructor. """ # End long running op for process if not verbose if not self.is_verbose: self.cmd.cli_ctx.get_progress_controller().end() # Track telemetry data elapsed_time = timeit.default_timer() - self.start_time if self.command_name == VM_REPAIR_RUN_COMMAND: _track_run_command_telemetry(self.logger, self.command_name, self.command_params, self.status, self.message, self.error_message, self.error_stack_trace, elapsed_time, get_subscription_id(self.cmd.cli_ctx), self.return_dict, self.script.run_id, self.script.status, self.script.output, self.script.run_time) else: _track_command_telemetry(self.logger, self.command_name, self.command_params, self.status, self.message, self.error_message, self.error_stack_trace, elapsed_time, get_subscription_id(self.cmd.cli_ctx), self.return_dict) def set_status_success(self): """ Set command status to success """ self.status = STATUS_SUCCESS def set_status_error(self): """ Set command status to error """ self.status = STATUS_ERROR def is_status_success(self): return self.status == STATUS_SUCCESS def init_return_dict(self): """ Returns the command return dictionary """ self.return_dict = {} self.return_dict["status"] = self.status self.return_dict["message"] = self.message if not self.is_status_success(): self.return_dict["error_message"] = self.error_message if self.error_message: self.logger.error(self.error_message) if self.message: self.logger.error(self.message) return self.return_dict class script_data(object): """ Stores repair script data. """ def __init__(self): # Unique run-id self.run_id = '' # Script status self.status = '' # Script Output self.output = '' # Script run time self.run_time = None def set_status_success(self): """ Set command status to success """ self.status = STATUS_SUCCESS def set_status_error(self): """ Set command status to error """ self.status = STATUS_ERROR
nilq/baby-python
python
from . import base_api_core class Backup(base_api_core.Core): def __init__(self, ip_address, port, username, password, secure=False, cert_verify=False, dsm_version=2, debug=True): super(Backup, self).__init__(ip_address, port, username, password, secure, cert_verify, dsm_version, debug) def backup_repository_get(self): api_name = 'SYNO.Backup.Repository' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'get'} return self.request_data(api_name, api_path, req_param) def backup_repository_list(self): api_name = 'SYNO.Backup.Repository' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'list'} return self.request_data(api_name, api_path, req_param) def backup_task_list(self): api_name = 'SYNO.Backup.Task' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'list'} return self.request_data(api_name, api_path, req_param) def backup_task_status(self): api_name = 'SYNO.Backup.Task' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'status'} return self.request_data(api_name, api_path, req_param) def backup_task_get(self): api_name = 'SYNO.Backup.Task' info = self.core_list[api_name] api_path = info['path'] req_param = {'version': info['maxVersion'], 'method': 'get'} return self.request_data(api_name, api_path, req_param)
nilq/baby-python
python
""" Copyright (C) 2019 NetApp Inc. All rights reserved. A test module for the recline.repl.shell module """ import asyncio import builtins import pytest import recline from recline.repl import shell @pytest.mark.parametrize("user_input, expected_marker, expected_output", [ ("ut command -arg 2", 2, ""), ("ut command", None, "required: -arg"), ("ut command -arg 5", None, "This is a UT failure"), ("ut command -arg foo", None, "invalid int value"), ("bad command", None, "Unknown command"), ("ut command -arg 2 && ut command -arg 3", 3, ""), ("ut command; ut command -arg 3", 3, "required: -arg"), ("ut command -arg 5 && ut command -arg 2", None, "This is a UT failure"), ("ut command -arg 2 || ut command -arg 1", 2, ""), ("bad command; bad other command || ut command -arg 3", 3, "Unknown command"), ]) def test_shell_execute(user_input, expected_marker, expected_output, capsys): """Test that our shell can run one or more commands on input""" ut_marker = None @recline.command(name="ut command") def ut_command(arg: int): # pylint: disable=unused-variable if arg == 5: raise ValueError("This is a UT failure") nonlocal ut_marker ut_marker = arg shell.execute(user_input) assert ut_marker == expected_marker captured = capsys.readouterr() assert expected_output in captured.out + captured.err @pytest.mark.parametrize("user_input, expected_marker", [ ("ut async command -arg 2", 2), ("ut async command -arg 30", 30), ]) def test_shell_execute_async_command(user_input, expected_marker): """Verify we can run async commands as well""" ut_marker = None @recline.command(name="ut async command") async def ut_command(arg: int): # pylint: disable=unused-variable loops = 0 while loops < arg: loops += 1 await asyncio.sleep(0.001) nonlocal ut_marker ut_marker = arg shell.execute(user_input) assert ut_marker == expected_marker def test_run_startup_exit_command(monkeypatch): """Verify that a command which is marked to run at startup or exit gets run""" startup_command_ran = False recline.commands.START_COMMAND = None def mock_eof(prompt): raise EOFError("UT is finished") monkeypatch.setattr(builtins, "input", mock_eof) @recline.command(atstart=True) def startup(): # pylint: disable=unused-variable nonlocal startup_command_ran startup_command_ran = True with pytest.raises(SystemExit): shell.relax(argv=["ut_program"]) assert startup_command_ran recline.commands.START_COMMAND = None @pytest.mark.parametrize("motd, expected", [ ("This is a simple message", "This is a simple message"), (lambda: "This is a dynamic message", "This is a dynamic message"), ]) def test_run_motd(motd, expected, monkeypatch, capsys): """Verify the MOTD gets printed if one is provided""" def mock_eof(prompt): raise EOFError("UT is finished") monkeypatch.setattr(builtins, "input", mock_eof) with pytest.raises(SystemExit): shell.relax(argv=["ut_program"], motd=motd) captured = capsys.readouterr() assert expected in captured.out def test_run_with_dash_c(): """Verify only a single command is run when -c is passed in""" @recline.command(name="single command") def single_command(): # pylint: disable=unused-variable return 73 assert shell.relax(argv=["ut_program", "-c", "single", "command"]) == 73 def test_run_non_repl(): """Verify that if a program is not trying to be a repl, then we will parse a command from the input and exit """ @recline.command(name="single command") def single_command(): # pylint: disable=unused-variable return 73 assert shell.relax(argv=["ut_program", "single", "command"], repl_mode=False) == 73 def test_run_single_command(): """Verify that if a program is not trying to be a repl, then we will parse a command from the input and exit """ @recline.command(name="single command") def single_command(): # pylint: disable=unused-variable return 73 assert shell.relax(argv=["ut_program"], single_command="single command") == 73
nilq/baby-python
python
#!/usr/bin/env python #-*- coding:utf-8 -*- ## ## setup.py ## ## Created on: Jan 23, 2018 ## Author: Alexey S. Ignatiev ## E-mail: aignatiev@ciencias.ulisboa.pt ## # #============================================================================== import os import os.path import contextlib import glob try: from setuptools import setup, Extension HAVE_SETUPTOOLS = True except ImportError: from distutils.core import setup, Extension HAVE_SETUPTOOLS = False import distutils.command.build import distutils.command.install import inspect, os, sys sys.path.insert(0, os.path.join(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])), 'solvers/')) import platform import prepare from pysat import __version__ # #============================================================================== @contextlib.contextmanager def chdir(new_dir): old_dir = os.getcwd() try: os.chdir(new_dir) yield finally: os.chdir(old_dir) # #============================================================================== ROOT = os.path.abspath(os.path.dirname(__file__)) LONG_DESCRIPTION = """ A Python library providing a simple interface to a number of state-of-art Boolean satisfiability (SAT) solvers and a few types of cardinality and pseudo-Boolean encodings. The purpose of PySAT is to enable researchers working on SAT and its applications and generalizations to easily prototype with SAT oracles in Python while exploiting incrementally the power of the original low-level implementations of modern SAT solvers. With PySAT it should be easy for you to implement a MaxSAT solver, an MUS/MCS extractor/enumerator, or any tool solving an application problem with the (potentially multiple) use of a SAT oracle. Details can be found at `https://pysathq.github.io <https://pysathq.github.io>`__. """ # solvers to install #============================================================================== to_install = ['cadical', 'glucose30', 'glucose41', 'lingeling', 'maplechrono', 'maplecm', 'maplesat', 'minicard', 'minisat22', 'minisatgh'] # example scripts to install as standalone executables #============================================================================== scripts = ['fm', 'genhard', 'lbx', 'lsu', 'mcsls', 'models', 'musx', 'rc2'] # we need to redefine the build command to # be able to download and compile solvers #============================================================================== class build(distutils.command.build.build): """ Our custom builder class. """ def run(self): """ Download, patch and compile SAT solvers before building. """ # download and compile solvers prepare.do(to_install) # now, do standard build distutils.command.build.build.run(self) # compilation flags for C extensions #============================================================================== compile_flags, cpplib = ['-std=c++11', '-Wall', '-Wno-deprecated'], ['stdc++'] if platform.system() == 'Darwin': compile_flags += ['--stdlib=libc++'] cpplib = ['c++'] elif platform.system() == 'Windows': compile_flags = ['-DNBUILD', '-DNLGLYALSAT' , '/DINCREMENTAL', '-DNLGLOG', '-DNDEBUG', '-DNCHKSOL', '-DNLGLFILES', '-DNLGLDEMA', '/experimental:preprocessor', '-I./zlib'] cpplib = [] # C extensions: pycard and pysolvers #============================================================================== pycard_ext = Extension('pycard', sources=['cardenc/pycard.cc'], extra_compile_args=compile_flags, include_dirs=['cardenc'] , language='c++', libraries=cpplib, library_dirs=[] ) pysolvers_sources = ['solvers/pysolvers.cc'] if platform.system() == 'Windows': with chdir('solvers'): for solver in to_install: with chdir(solver): for filename in glob.glob('*.c*'): pysolvers_sources += ['solvers/%s/%s' % (solver, filename)] for filename in glob.glob('*/*.c*'): pysolvers_sources += ['solvers/%s/%s' % (solver, filename)] libraries = [] library_dirs = [] else: libraries = to_install + cpplib library_dirs = list(map(lambda x: os.path.join('solvers', x), to_install)) pysolvers_ext = Extension('pysolvers', sources=pysolvers_sources, extra_compile_args=compile_flags + \ list(map(lambda x: '-DWITH_{0}'.format(x.upper()), to_install)), include_dirs=['solvers'], language='c++', libraries=libraries, library_dirs=library_dirs ) # finally, calling standard setuptools.setup() (or distutils.core.setup()) #============================================================================== setup(name='python-sat', packages=['pysat', 'pysat.examples'], package_dir={'pysat.examples': 'examples'}, version=__version__, description='A Python library for prototyping with SAT oracles', long_description=LONG_DESCRIPTION, long_description_content_type='text/x-rst; charset=UTF-8', license='MIT', author='Alexey Ignatiev, Joao Marques-Silva, Antonio Morgado', author_email='aignatiev@ciencias.ulisboa.pt, jpms@ciencias.ulisboa.pt, ajmorgado@ciencias.ulisboa.pt', url='https://github.com/pysathq/pysat', ext_modules=[pycard_ext, pysolvers_ext], scripts=['examples/{0}.py'.format(s) for s in scripts], cmdclass={'build': build}, install_requires=['six'], extras_require = { 'aiger': ['py-aiger-cnf>=2.0.0'], 'pblib': ['pypblib>=0.0.3'] } )
nilq/baby-python
python
# pylint: disable=missing-module-docstring # -*- coding: utf-8 -*- __short_version__ = '1.6' __release__ = '1.6.0' __description__ = 'Framework for Quart to add swagger generation to routes and restful resources'
nilq/baby-python
python
""" This module contains common code shared by utils/rule_dir_stats.py and utils/rule_dir_diff.py. This code includes functions for walking the output of the utils/rule_dir_json.py script, and filtering functions used in both scripts. """ from __future__ import absolute_import from __future__ import print_function import os from collections import defaultdict from .build_remediations import REMEDIATION_TO_EXT_MAP as REMEDIATION_MAP from .utils import subset_dict def get_affected_products(rule_obj): """ From a rule_obj, return the set of affected products from rule.yml """ return set(rule_obj['products']) def get_all_affected_products(args, rule_obj): """ From a rule_obj, return the set of affected products from rule.yml, and all fixes and checks. If args.strict is set, this function is equivalent to get_affected_products. Otherwise, it includes ovals and fix content based on the values of args.fixes_only and args.ovals_only. """ affected_products = get_affected_products(rule_obj) if args.strict: return affected_products if not args.fixes_only: for product in rule_obj['oval_products']: affected_products.add(product) if not args.ovals_only: for product in rule_obj['remediation_products']: affected_products.add(product) return affected_products def _walk_rule(args, rule_obj, oval_func, remediation_func, verbose_output): """ Walks a single rule and updates verbose_output if visited. Returns visited state as a boolean. Internal function for walk_rules and walk_rules_parallel. """ rule_id = rule_obj['id'] affected_products = get_all_affected_products(args, rule_obj) if not affected_products.intersection(args.products): return False if args.query and rule_id not in args.query: return False if not args.fixes_only: result = oval_func(rule_obj) if result: verbose_output[rule_id]['oval'] = result if not args.ovals_only: for r_type in REMEDIATION_MAP: result = remediation_func(rule_obj, r_type) if result: verbose_output[rule_id][r_type] = result return True def walk_rules(args, known_rules, oval_func, remediation_func): """ Walk a dictionary of known_rules, returning the number of visited rules and the output at each visited rule, conditionally calling oval_func and remediation_func based on the values of args.fixes_only and args.ovals_only. If the result of these functions are not Falsy, set the appropriate output content. The input rule_obj structure is the value of known_rules[rule_id]. The output structure is a dict as follows: { rule_id: { "oval": oval_func(args, rule_obj), "ansible": remediation_func(args, "ansible", rule_obj), "anaconda": remediation_func(args, "anaconda", rule_obj), "bash": remediation_func(args, "bash", rule_obj), "puppet": remediation_func(args, "puppet", rule_obj) }, ... } The arguments supplied to oval_func are args and rule_obj. The arguments supplied to remediation_func are args, the remediation type, and rule_obj. """ affected_rules = 0 verbose_output = defaultdict(lambda: defaultdict(lambda: None)) for rule_id in known_rules: rule_obj = known_rules[rule_id] if _walk_rule(args, rule_obj, oval_func, remediation_func, verbose_output): affected_rules += 1 return affected_rules, verbose_output def walk_rule_stats(rule_output): """ Walk the output of a rule, generating statistics about affected ovals, remediations, and generating verbose output in a stable order. Returns a tuple of (affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output) """ affected_ovals = 0 affected_remediations = 0 all_affected_remediations = 0 affected_remediations_type = defaultdict(lambda: 0) all_output = [] affected_remediation = False all_remedation = True if 'oval' in rule_output: affected_ovals += 1 all_output.append(rule_output['oval']) for r_type in sorted(REMEDIATION_MAP): if r_type in rule_output: affected_remediation = True affected_remediations_type[r_type] += 1 all_output.append(rule_output[r_type]) else: all_remedation = False if affected_remediation: affected_remediations += 1 if all_remedation: all_affected_remediations += 1 return (affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output) def walk_rules_stats(args, known_rules, oval_func, remediation_func): """ Walk a dictionary of known_rules and generate simple aggregate statistics for all visited rules. The oval_func and remediation_func arguments behave according to walk_rules(). Returned values are visited_rules, affected_ovals, affected_remediation, a dictionary containing all fix types and the quantity of affected fixes, and the ordered output of all functions. An effort is made to provide consistently ordered verbose_output by sorting all visited keys and the keys of ssg.build_remediations.REMEDIATION_MAP. """ affected_rules, verbose_output = walk_rules(args, known_rules, oval_func, remediation_func) affected_ovals = 0 affected_remediations = 0 all_affected_remediations = 0 affected_remediations_type = defaultdict(lambda: 0) all_output = [] for rule_id in sorted(verbose_output): rule_output = verbose_output[rule_id] results = walk_rule_stats(rule_output) affected_ovals += results[0] affected_remediations += results[1] all_affected_remediations += results[2] for key in results[3]: affected_remediations_type[key] += results[3][key] all_output.extend(results[4]) return (affected_rules, affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output) def walk_rules_parallel(args, left_rules, right_rules, oval_func, remediation_func): """ Walks two sets of known_rules (left_rules and right_rules) with identical keys and returns left_only, right_only, and common_only output from _walk_rule. If the outputted data for a rule when called on left_rules and right_rules is the same, it is added to common_only. Only rules which output different data will have their data added to left_only and right_only respectively. Can assert. """ left_affected_rules = 0 right_affected_rules = 0 common_affected_rules = 0 left_verbose_output = defaultdict(lambda: defaultdict(lambda: None)) right_verbose_output = defaultdict(lambda: defaultdict(lambda: None)) common_verbose_output = defaultdict(lambda: defaultdict(lambda: None)) assert set(left_rules) == set(right_rules) for rule_id in left_rules: left_rule_obj = left_rules[rule_id] right_rule_obj = right_rules[rule_id] if left_rule_obj == right_rule_obj: if _walk_rule(args, left_rule_obj, oval_func, remediation_func, common_verbose_output): common_affected_rules += 1 else: left_temp = defaultdict(lambda: defaultdict(lambda: None)) right_temp = defaultdict(lambda: defaultdict(lambda: None)) left_ret = _walk_rule(args, left_rule_obj, oval_func, remediation_func, left_temp) right_ret = _walk_rule(args, right_rule_obj, oval_func, remediation_func, right_temp) if left_ret == right_ret and left_temp == right_temp: common_verbose_output.update(left_temp) if left_ret: common_affected_rules += 1 else: left_verbose_output.update(left_temp) right_verbose_output.update(right_temp) if left_ret: left_affected_rules += 1 if right_ret: right_affected_rules += 1 left_only = (left_affected_rules, left_verbose_output) right_only = (right_affected_rules, right_verbose_output) common_only = (common_affected_rules, common_verbose_output) return left_only, right_only, common_only def walk_rules_diff(args, left_rules, right_rules, oval_func, remediation_func): """ Walk a two dictionary of known_rules (left_rules and right_rules) and generate five sets of output: left_only rules output, right_only rules output, shared left output, shared right output, and shared common output, as a five-tuple, where each tuple element is equivalent to walk_rules on the appropriate set of rules. Does not understand renaming of rule_ids as this would depend on disk content to reflect these differences. Unless significantly more data is added to the rule_obj structure (contents of rule.yml, ovals, remediations, etc.), all information besides 'title' is not uniquely identifying or could be easily updated. """ left_rule_ids = set(left_rules) right_rule_ids = set(right_rules) left_only_rule_ids = left_rule_ids.difference(right_rule_ids) right_only_rule_ids = right_rule_ids.difference(left_rule_ids) common_rule_ids = left_rule_ids.intersection(right_rule_ids) left_restricted = subset_dict(left_rules, left_only_rule_ids) left_common = subset_dict(left_rules, common_rule_ids) right_restricted = subset_dict(right_rules, right_only_rule_ids) right_common = subset_dict(right_rules, common_rule_ids) left_only_data = walk_rules(args, left_restricted, oval_func, remediation_func) right_only_data = walk_rules(args, right_restricted, oval_func, remediation_func) l_c_d, r_c_d, c_d = walk_rules_parallel(args, left_common, right_common, oval_func, remediation_func) left_changed_data = l_c_d right_changed_data = r_c_d common_data = c_d return (left_only_data, right_only_data, left_changed_data, right_changed_data, common_data) def walk_rules_diff_stats(results): """ Takes the results of walk_rules_diff (results) and generates five sets of output statistics: left_only rules output, right_only rules output, shared left output, shared right output, and shared common output, as a five-tuple, where each tuple element is equivalent to walk_rules_stats on the appropriate set of rules. Can assert. """ assert len(results) == 5 output_data = [] for data in results: affected_rules, verbose_output = data affected_ovals = 0 affected_remediations = 0 all_affected_remediations = 0 affected_remediations_type = defaultdict(lambda: 0) all_output = [] for rule_id in sorted(verbose_output): rule_output = verbose_output[rule_id] _results = walk_rule_stats(rule_output) affected_ovals += _results[0] affected_remediations += _results[1] all_affected_remediations += _results[2] for key in _results[3]: affected_remediations_type[key] += _results[3][key] all_output.extend(_results[4]) output_data.append((affected_rules, affected_ovals, affected_remediations, all_affected_remediations, affected_remediations_type, all_output)) assert len(output_data) == 5 return tuple(output_data) def filter_rule_ids(all_keys, queries): """ From a set of queries (a comma separated list of queries, where a query is either a rule id or a substring thereof), return the set of matching keys from all_keys. When queries is the literal string "all", return all of the keys. """ if not queries: return set() if queries == 'all': return set(all_keys) # We assume that all_keys is much longer than queries; this allows us to do # len(all_keys) iterations of size len(query_parts) instead of len(query_parts) # queries of size len(all_keys) -- which hopefully should be a faster data access # pattern due to caches but in reality shouldn't matter. Note that we have to iterate # over the keys in all_keys either way, because we wish to check whether query is a # substring of a key, not whether query is a key. # # This does have the side-effect of not having the results be ordered according to # their order in query_parts, so we instead, we intentionally discard order by using # a set. This also guarantees that our results are unique. results = set() query_parts = queries.split(',') for key in all_keys: for query in query_parts: if query in key: results.add(key) return results def missing_oval(rule_obj): """ For a rule object, check if it is missing an oval. """ rule_id = rule_obj['id'] check = len(rule_obj['ovals']) > 0 if not check: return "\trule_id:%s is missing all OVALs" % rule_id def missing_remediation(rule_obj, r_type): """ For a rule object, check if it is missing a remediation of type r_type. """ rule_id = rule_obj['id'] check = len(rule_obj['remediations'][r_type]) > 0 if not check: return "\trule_id:%s is missing %s remediations" % (rule_id, r_type) def two_plus_oval(rule_obj): """ For a rule object, check if it has two or more OVALs. """ rule_id = rule_obj['id'] check = len(rule_obj['ovals']) >= 2 if check: return "\trule_id:%s has two or more OVALs: %s" % (rule_id, ','.join(rule_obj['ovals'])) def two_plus_remediation(rule_obj, r_type): """ For a rule object, check if it has two or more remediations of type r_type. """ rule_id = rule_obj['id'] check = len(rule_obj['remediations'][r_type]) >= 2 if check: return "\trule_id:%s has two or more %s remediations: %s" % \ (rule_id, r_type, ','.join(rule_obj['remediations'][r_type])) def prodtypes_oval(rule_obj): """ For a rule object, check if the prodtypes match between the YAML and the OVALs. """ rule_id = rule_obj['id'] rule_products = set(rule_obj['products']) if not rule_products: return oval_products = set() for oval in rule_obj['ovals']: oval_products.update(rule_obj['ovals'][oval]['products']) if not oval_products: return sym_diff = sorted(rule_products.symmetric_difference(oval_products)) check = len(sym_diff) > 0 if check: return "\trule_id:%s has a different prodtypes between YAML and OVALs: %s" % \ (rule_id, ','.join(sym_diff)) def prodtypes_remediation(rule_obj, r_type): """ For a rule object, check if the prodtypes match between the YAML and the remediations of type r_type. """ rule_id = rule_obj['id'] rule_products = set(rule_obj['products']) if not rule_products: return remediation_products = set() for remediation in rule_obj['remediations'][r_type]: remediation_products.update(rule_obj['remediations'][r_type][remediation]['products']) if not remediation_products: return sym_diff = sorted(rule_products.symmetric_difference(remediation_products)) check = len(sym_diff) > 0 and rule_products and remediation_products if check: return "\trule_id:%s has a different prodtypes between YAML and %s remediations: %s" % \ (rule_id, r_type, ','.join(sym_diff)) def product_names_oval(rule_obj): """ For a rule_obj, check the scope of the platforms versus the product name of the OVAL objects. """ rule_id = rule_obj['id'] for oval_name in rule_obj['ovals']: if oval_name == "shared.xml": continue oval_product, _ = os.path.splitext(oval_name) for product in rule_obj['ovals'][oval_name]['products']: if product != oval_product: return "\trule_id:%s has a different product and OVALs names: %s is not %s" % \ (rule_id, product, oval_product) def product_names_remediation(rule_obj, r_type): """ For a rule_obj, check the scope of the platforms versus the product name of the remediations of type r_type. """ rule_id = rule_obj['id'] for r_name in rule_obj['remediations'][r_type]: r_product, _ = os.path.splitext(r_name) if r_product == "shared": continue for product in rule_obj['remediations'][r_type][r_name]['products']: if product != r_product: return "\trule_id:%s has a different product and %s remediation names: %s is not %s" % \ (rule_id, r_type, product, r_product)
nilq/baby-python
python
class Database(Exception): pass class Serialize(Database): def __init__(self, cls, msg="Serialization Failed"): self.cls = cls self.msg = msg def __str__(self) -> str: return f"'{self.cls}' {self.msg}"
nilq/baby-python
python
from sqlalchemy import Column, Integer, String, Sequence, SmallInteger from sgs_schema.declarative_base import Base from sqlalchemy.orm import relationship from sqlalchemy.sql.schema import ForeignKey from sqlalchemy.sql.sqltypes import Float class Produto(Base): __tablename__ = "PRODUTO" id = Column(Integer, Sequence("PRODUTO_ID_GEN"), primary_key=True) codigo = Column(String(20)) codbarra = Column(String(50)) descricao = Column(String(250)) id_categoria = Column(Integer, ForeignKey("CATPRODUTO.id"), nullable=False) categoria = relationship("CategoriaProduto") id_unidade = Column(Integer) #TODO: unity = relationship("ItemUnity") custo = Column(Float) precovenda = Column(Float, default=0) precovenda2 = Column(Float, default=0) precovenda3 = Column(Float, default=0) id_unidade_venda = Column(Float) #TODO: unity_sell = relationship("ItemUnity") vende_sem_estoque = Column(Integer, default=0) #TODO: balanca = None fator_un_venda = Column(Integer, default=1) marca = Column(String(50)) para_revenda = Column(SmallInteger) id_moeda = Column(Integer, default=1) inativo = Column(Integer, default=0) class CategoriaProduto(Base): __tablename__ = "CATPRODUTO" id = Column(Integer, Sequence('id_manager'), primary_key=True) descricao = Column(String(50)) tem_aprovacao = Column(Integer, default=0) id_owner = Column(Integer, ForeignKey("CATPRODUTO.id"))
nilq/baby-python
python
# SPDX-FileCopyrightText: 2019 Scott Shawcroft for Adafruit Industries # # SPDX-License-Identifier: MIT """ `adafruit_il91874` ================================================================================ CircuitPython `displayio` driver for IL91874-based ePaper displays * Author(s): Scott Shawcroft Implementation Notes -------------------- **Hardware:** * `Adafruit 2.7" Tri-Color ePaper Display Shield <https://www.adafruit.com/product/4229>`_ **Software and Dependencies:** * Adafruit CircuitPython firmware for the supported boards: https://github.com/adafruit/circuitpython/releases """ import displayio __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_IL91874.git" _START_SEQUENCE = ( b"\x04\x00" # Power on b"\x00\x01\xaf" # panel setting b"\x30\x01\x3a" # PLL b"\x01\x05\x03\x00\x2b\x2b\x09" # power setting b"\x06\x03\x07\x07\x17" # booster soft start b"\xf8\x02\x60\xa5" # mystery command in example code b"\xf8\x02\x89\xa5" # mystery command in example code b"\xf8\x02\x90\x00" # mystery command in example code b"\xf8\x02\x93\xa2" # mystery command in example code b"\xf8\x02\x73\x41" # mystery command in example code b"\x82\x01\x12" # VCM DC b"\x50\x01\x87" # CDI setting # Look Up Tables # LUT1 b"\x20\x2c\x00\x00\x00\x1a\x1a\x00\x00\x01\x00\x0a\x0a\x00\x00\x08\x00\x0e\x01\x0e\x01\x10\x00" b"\x0a\x0a\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" # LUTWW b"\x21\x2a\x90\x1a\x1a\x00\x00\x01\x40\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x80\x0a\x0a" b"\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" # LUTBW b"\x22\x2a\xa0\x1a\x1a\x00\x00\x01\x00\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x90\x0a\x0a" b"\x00\x00\x08\xb0\x04\x10\x00\x00\x05\xb0\x03\x0e\x00\x00\x0a\xc0\x23\x00\x00\x00\x01" # LUTWB b"\x23\x2a\x90\x1a\x1a\x00\x00\x01\x40\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x80\x0a\x0a" b"\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" # LUTBB b"\x24\x2a\x90\x1a\x1a\x00\x00\x01\x20\x0a\x0a\x00\x00\x08\x84\x0e\x01\x0e\x01\x10\x10\x0a\x0a" b"\x00\x00\x08\x00\x04\x10\x00\x00\x05\x00\x03\x0e\x00\x00\x0a\x00\x23\x00\x00\x00\x01" b"\x61\x04\x00\x00\x00\x00" # Resolution b"\x16\x80\x00" # PDRF ) _STOP_SEQUENCE = b"\x02\x01\x17" # Power off # pylint: disable=too-few-public-methods class IL91874(displayio.EPaperDisplay): """IL91874 display driver""" def __init__(self, bus, **kwargs): start_sequence = bytearray(_START_SEQUENCE) width = kwargs["width"] height = kwargs["height"] if "rotation" in kwargs and kwargs["rotation"] % 180 != 0: width, height = height, width start_sequence[-7] = (width >> 8) & 0xFF start_sequence[-6] = width & 0xFF start_sequence[-5] = (height >> 8) & 0xFF start_sequence[-4] = height & 0xFF super().__init__( bus, start_sequence, _STOP_SEQUENCE, **kwargs, ram_width=320, ram_height=300, busy_state=False, write_black_ram_command=0x10, black_bits_inverted=True, write_color_ram_command=0x13, refresh_display_command=0x12, always_toggle_chip_select=True, )
nilq/baby-python
python
# pylint: disable=C0111,R0903 """Print the branch and git status for the currently focused window. Requires: * xcwd * Python module 'pygit2' """ import os import pygit2 import core.module import util.cli class Module(core.module.Module): def __init__(self, config, theme): super().__init__(config, theme, []) self.__error = False def hidden(self): return self.__error def update(self): state = {} self.clear_widgets() try: directory = util.cli.execute("xcwd").strip() directory = self.__get_git_root(directory) repo = pygit2.Repository(directory) self.add_widget(name="git.main", full_text=repo.head.shorthand) for filepath, flags in repo.status().items(): if ( flags == pygit2.GIT_STATUS_WT_NEW or flags == pygit2.GIT_STATUS_INDEX_NEW ): state["new"] = True if ( flags == pygit2.GIT_STATUS_WT_DELETED or flags == pygit2.GIT_STATUS_INDEX_DELETED ): state["deleted"] = True if ( flags == pygit2.GIT_STATUS_WT_MODIFIED or flags == pygit2.GIT_STATUS_INDEX_MODIFIED ): state["modified"] = True self.__error = False if "new" in state: self.add_widget(name="git.new") if "modified" in state: self.add_widget(name="git.modified") if "deleted" in state: self.add_widget(name="git.deleted") except Exception as e: self.__error = True def state(self, widget): return widget.name.split(".")[1] def __get_git_root(self, directory): while len(directory) > 1: if os.path.exists(os.path.join(directory, ".git")): return directory directory = "/".join(directory.split("/")[0:-1]) return "/" # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
nilq/baby-python
python
from pathlib import Path import typer from spacy.tokens import DocBin import spacy ASSETS_DIR = Path(__file__).parent.parent / "assets" CORPUS_DIR = Path(__file__).parent.parent / "corpus" def read_categories(path: Path): return path.open().read().strip().split("\n") def read_tsv(file_): for line in file_: text, labels, annotator = line.split("\t") yield { "text": text, "labels": [int(label) for label in labels.split(",") if label != ''], "annotator": annotator } def convert_record(nlp, record, categories): """Convert a record from the tsv into a spaCy Doc object.""" doc = nlp.make_doc(record["text"]) # All categories other than the true ones get value 0 doc.cats = {category: 0 for category in categories} # True labels get value 1 for label in record["labels"]: doc.cats[categories[label]] = 1 return doc def main(assets_dir: Path=ASSETS_DIR, corpus_dir: Path=CORPUS_DIR, lang: str="en"): """Convert the GoEmotion corpus's tsv files to spaCy's binary format.""" categories = read_categories(assets_dir / "categories.txt") nlp = spacy.blank(lang) for tsv_file in assets_dir.iterdir(): if not tsv_file.parts[-1].endswith(".tsv"): continue records = read_tsv(tsv_file.open(encoding="utf8")) docs = [convert_record(nlp, record, categories) for record in records] out_file = corpus_dir / tsv_file.with_suffix(".spacy").parts[-1] out_data = DocBin(docs=docs).to_bytes() with out_file.open("wb") as file_: file_.write(out_data) if __name__ == "__main__": typer.run(main)
nilq/baby-python
python
# -*- coding: utf-8 -*- from httoop.exceptions import InvalidURI from httoop.messages import Request, Response from httoop.parser import NOT_RECEIVED_YET, StateMachine from httoop.status import ( BAD_REQUEST, HTTP_VERSION_NOT_SUPPORTED, LENGTH_REQUIRED, MOVED_PERMANENTLY, SWITCHING_PROTOCOLS, URI_TOO_LONG, ) from httoop.util import Unicode, _ from httoop.version import ServerHeader, ServerProtocol class ServerStateMachine(StateMachine): Message = Request HTTP2 = None def __init__(self, scheme, host, port): super(ServerStateMachine, self).__init__() self.MAX_URI_LENGTH = float('inf') # 8000 self._default_scheme = scheme self._default_host = host self._default_port = port self.request = None self.response = None def on_message_started(self): super(ServerStateMachine, self).on_message_started() self.response = Response() self.request = self.message self.state.update(dict( method=False, uri=False )) def on_message_complete(self): request = super(ServerStateMachine, self).on_message_complete() response = self.response self.request = None self.response = None return (request, response) def parse_startline(self): state = super(ServerStateMachine, self).parse_startline() if state is NOT_RECEIVED_YET: self._check_uri_max_length(self.buffer) return state def on_startline_complete(self): self.state['method'] = True self.on_method_complete() self.state['uri'] = True self.on_uri_complete() super(ServerStateMachine, self).on_startline_complete() def on_uri_complete(self): super(ServerStateMachine, self).on_uri_complete() self._check_uri_max_length(bytes(self.request.uri)) self.sanitize_request_uri_path() self.validate_request_uri_scheme() self.set_server_response_header() def on_protocol_complete(self): super(ServerStateMachine, self).on_protocol_complete() self.check_request_protocol() self.set_response_protocol() def on_headers_complete(self): self.check_host_header_exists() self.set_request_uri_host() self.check_http2_upgrade() super(ServerStateMachine, self).on_headers_complete() def on_body_complete(self): self.check_message_without_body_containing_data() super(ServerStateMachine, self).on_body_complete() self.check_methods_without_body() def check_request_protocol(self): # check if we speak the same major HTTP version if self.message.protocol > ServerProtocol: # the major HTTP version differs raise HTTP_VERSION_NOT_SUPPORTED('The server only supports HTTP/1.0 and HTTP/1.1.') def set_response_protocol(self): # set appropriate response protocol version self.response.protocol = min(self.message.protocol, ServerProtocol) def _check_uri_max_length(self, uri): if len(uri) > self.MAX_URI_LENGTH: raise URI_TOO_LONG( u'The maximum length of the request is %d' % self.MAX_URI_LENGTH ) def sanitize_request_uri_path(self): path = self.message.uri.path self.message.uri.normalize() if path != self.message.uri.path: raise MOVED_PERMANENTLY(self.message.uri.path.encode('UTF-8')) def validate_request_uri_scheme(self): if self.message.uri.scheme: if self.message.uri.scheme not in ('http', 'https'): # pragma: no cover exc = InvalidURI(_(u'Invalid URL: wrong scheme')) raise BAD_REQUEST(Unicode(exc)) else: self.message.uri.scheme = self._default_scheme self.message.uri.host = self._default_host self.message.uri.port = self._default_port def set_server_response_header(self): self.response.headers.setdefault('Server', ServerHeader) def check_host_header_exists(self): if self.message.protocol >= (1, 1) and 'Host' not in self.message.headers: raise BAD_REQUEST('Missing Host header') def set_request_uri_host(self): if 'Host' not in self.message.headers: return host = self.message.headers.element('Host') self.message.uri.host = host.host self.message.uri.port = host.port def check_message_without_body_containing_data(self): if self.buffer and 'Content-Length' not in self.message.headers and not self.chunked: # request without Content-Length header but body raise LENGTH_REQUIRED(u'Missing Content-Length header.') def check_methods_without_body(self): if self.message.method in (u'HEAD', u'GET', u'TRACE') and self.message.body: raise BAD_REQUEST('A %s request is considered as safe and MUST NOT contain a request body.' % self.message.method) def check_http2_upgrade(self): def is_http2_upgrade(): connection = self.message.headers.values('Connection') yield 'Upgrade' in connection yield 'HTTP2-Settings' in connection yield 'Upgrade' in self.message.headers yield self.message.headers.element('Upgrade') == 'h2c' yield 'HTTP2-Settings' in self.message.headers yield self.message.headers.element('HTTP2-Settings') if all(is_http2_upgrade()): if self.HTTP2 is None: return self.response.headers['Upgrade'] = 'h2c' self.response.headers['Connection'] = 'Upgrade' self.__class__ = self.HTTP2 raise SWITCHING_PROTOCOLS()
nilq/baby-python
python
""" Test passing exceptions to logs """ import inspect import pytest from .util import check_finished_spans, logger, tracer @pytest.mark.parametrize('stmt,exception', [ ('1 / 0', ZeroDivisionError('division by zero')), ('y = non_existent_variable', NameError("name 'non_existent_variable' is not defined")), ('import non_existent_package', ModuleNotFoundError("No module named 'non_existent_package'")), ]) def test_exception(logger, tracer, stmt, exception): operation_name = 'span_exception' log = { 'event': 'error', 'message': 'Who would cross the Bridge of Death must answer me these questions three, ' 'ere the other side he see.', 'error.object': exception, 'error.kind': exception.__class__, 'stack': f' File "{__file__}", line ' + '{lineno}, in {func}\n exec(stmt)\n File "<string>", ' 'line 1, in <module>\n', } with tracer.start_active_span(operation_name): try: lineno = inspect.currentframe().f_lineno + 1 exec(stmt) except exception.__class__: func = inspect.currentframe().f_code.co_name log['stack'] = log['stack'].format(lineno=lineno, func=func) logger.exception(log['message']) check_finished_spans(tracer=tracer, operation_names_expected=[operation_name], logs_expected={operation_name: [log]})
nilq/baby-python
python
from fastapi import HTTPException from datetime import datetime from .router import Router import models from secrets import token_hex class SessionsRouter(Router): def __init__(self, config, database): super().__init__('/sessions', config, database) def methods(self): @self.router.get('/all') async def get_sessions(key: models.ApiKey): api_key_check = self.check_api_key(key.key, 'super') if api_key_check is not True: raise api_key_check with self.database as cursor: cursor.execute("SELECT id, time, user, token FROM sessions") sessions = [{'id': id, 'time': time, 'user': user, 'token': token} for id, time, user, token in cursor.fetchall()] return sessions @self.router.get('/') async def get_session(key: models.ApiKey, session: models.Session): api_key_check = self.check_api_key(key.key, 'super') if api_key_check is not True: raise api_key_check with self.database as cursor: cursor.execute("SELECT id, user FROM sessions WHERE token=?", (session.token,)) session = cursor.fetchone() if not session: raise HTTPException(404, 'Session does not exist') id, user_id = session cursor.execute("SELECT permission FROM users WHERE id=?", (user_id,)) user = cursor.fetchone() if not user: raise HTTPException(404, 'User does not exist') permission, = user session = {'id': id, 'user': user_id, 'permission': permission} return session @self.router.put('/') async def put_session(key: models.ApiKey, session: models.NewSession): api_key_check = self.check_api_key(key.key, 'super') if api_key_check is not True: raise api_key_check with self.database as cursor: cursor.execute("SELECT id, permission FROM users WHERE nick=? AND password=?", (session.username, session.password)) user = cursor.fetchone() if not user: raise HTTPException(404, "User does not exist") user_id, permission = user cursor.execute("DELETE FROM sessions WHERE user=?", (user_id,)) time = round(datetime.now().timestamp()) token = token_hex(self.config['TOKEN_SECURITY'][permission]) cursor.execute("INSERT INTO sessions (time, user, token) VALUES (?, ?, ?)", (time, user_id, token)) cursor.execute("SELECT id, time, user, token FROM sessions WHERE user=?", (user_id,)) session = cursor.fetchone() return {'id': session[0], 'time': session[1], 'user': session[2], 'token': session[3]}
nilq/baby-python
python
# Copyright (c) 2020 elParaguayo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from typing import Any, List, Tuple from libqtile import bar, hook from libqtile.widget import base class WindowCount(base._TextBox): """A simple widget to show the number of windows in the current group.""" orientations = base.ORIENTATION_HORIZONTAL defaults = [ ("font", "sans", "Text font"), ("fontsize", None, "Font pixel size. Calculated if None."), ("fontshadow", None, "font shadow color, default is None(no shadow)"), ("padding", None, "Padding left and right. Calculated if None."), ("foreground", "#ffffff", "Foreground colour."), ("text_format", "{num}", "Format for message"), ("show_zero", False, "Show window count when no windows") ] # type: List[Tuple[str, Any, str]] def __init__(self, text=" ", width=bar.CALCULATED, **config): base._TextBox.__init__(self, text=text, width=width, **config) self.add_defaults(WindowCount.defaults) self._count = 0 def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) self._setup_hooks() self._wincount() def _setup_hooks(self): hook.subscribe.client_killed(self._win_killed) hook.subscribe.client_managed(self._wincount) hook.subscribe.current_screen_change(self._wincount) hook.subscribe.setgroup(self._wincount) def _wincount(self, *args): try: self._count = len(self.qtile.current_group.windows) except AttributeError: self._count = 0 self.update() def _win_killed(self, window): try: self._count = len(self.qtile.current_group.windows) except AttributeError: self._count = 0 if self._count and getattr(window, "group", None): self._count -= 1 self.update() def calculate_length(self): if self.text and (self._count or self.show_zero): return min( self.layout.width, self.bar.width ) + self.actual_padding * 2 else: return 0 def update(self): self.text = self.text_format.format(num=self._count) self.bar.draw() def cmd_get(self): """Retrieve the current text.""" return self.text
nilq/baby-python
python
# Copyright Google Inc. All Rights Reserved. # # Use of this source code is governed by an MIT-style license that can be # found in the LICENSE file at https://angular.io/license """ Public API surface is re-exported here. This API is exported for users building angular from source in downstream projects. The rules from packages/bazel are re-exported here as well as the ng_setup_workspace repository rule needed when building angular from source downstream. Alternately, this API is available from the @angular/bazel npm package if the npm distribution of angular is used in a downstream project. """ load("//packages/bazel:index.bzl", _ng_module = "ng_module", _ng_package = "ng_package", _protractor_web_test = "protractor_web_test", _protractor_web_test_suite = "protractor_web_test_suite") load("//tools:ng_setup_workspace.bzl", _ng_setup_workspace = "ng_setup_workspace") ng_module = _ng_module ng_package = _ng_package protractor_web_test = _protractor_web_test protractor_web_test_suite = _protractor_web_test_suite ng_setup_workspace = _ng_setup_workspace
nilq/baby-python
python
# encoding: utf-8 from __future__ import absolute_import, division, print_function, unicode_literals from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.template import Context, loader from haystack import connections, connection_router, constants from haystack.backends.solr_backend import SolrSearchBackend class Command(BaseCommand): help = "Generates a Solr schema that reflects the indexes." def add_arguments(self, parser): parser.add_argument( "-f", "--filename", help='If provided, directs output to a file instead of stdout.' ) parser.add_argument( "-u", "--using", default=constants.DEFAULT_ALIAS, help='If provided, chooses a connection to work with.' ) def handle(self, **options): """Generates a Solr schema that reflects the indexes.""" using = options.get('using') schema_xml = self.build_template(using=using) if options.get('filename'): self.write_file(options.get('filename'), schema_xml) else: self.print_stdout(schema_xml) def build_context(self, using): backend = connections[using].get_backend() if not isinstance(backend, SolrSearchBackend): raise ImproperlyConfigured("'%s' isn't configured as a SolrEngine)." % backend.connection_alias) content_field_name, fields = backend.build_schema( connections[using].get_unified_index().all_searchfields() ) return Context({ 'content_field_name': content_field_name, 'fields': fields, 'default_operator': constants.DEFAULT_OPERATOR, 'ID': constants.ID, 'DJANGO_CT': constants.DJANGO_CT, 'DJANGO_ID': constants.DJANGO_ID, }) def build_template(self, using): t = loader.get_template('search_configuration/solr.xml') c = self.build_context(using=using) return t.render(c) def print_stdout(self, schema_xml): self.stderr.write("\n") self.stderr.write("\n") self.stderr.write("\n") self.stderr.write("Save the following output to 'schema.xml' and place it in your Solr configuration directory.\n") self.stderr.write("--------------------------------------------------------------------------------------------\n") self.stderr.write("\n") self.stdout.write(schema_xml) def write_file(self, filename, schema_xml): with open(filename, 'w') as schema_file: schema_file.write(schema_xml)
nilq/baby-python
python
# Generated by Django 3.2.9 on 2021-12-12 10:34 import django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("customers", "0005_auto_20211205_0953"), ] operations = [ migrations.AlterField( model_name="address", name="customer", field=models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, related_name="addresses", to=settings.AUTH_USER_MODEL, verbose_name="Customer", ), ), ]
nilq/baby-python
python
#tree # ├── data # │   ├── SRR388226_1.fastq # │   ├── SRR388226_2.fastq # │   ├── SRR388227_1.fastq # │   ├── SRR388227_2.fastq # │   ├── SRR388228_1.fastq # │   ├── SRR388228_2.fastq # │   ├── SRR388229_1.fastq # │   ├── SRR388229_2.fastq # │   └── SRR.file # ├── fastqc_res # ├── RSEM_res # └── STAR_res #write for paired-end only import os import subprocess cpu="10" mainPath="/home/disk/fyh/lab_other_work/STAR_test/" fastqc="/home/disk/fyh/tools/FastQC/fastqc" trimmomatic="/home/disk/fyh/tools/Trimmomatic-0.38/trimmomatic-0.38.jar" STAR="/home/disk/fyh/tools/STAR-2.6.0a/bin/Linux_x86_64_static/STAR" RSEM="/home/disk/fyh/tools/RSEM-1.3.1/rsem-calculate-expression" fastq_phred="/home/disk/fyh/tools/scr/fastq_phred.pl" infer_experiment="/home/disk/fyh/tools/RSeQC-2.6.5/scripts/infer_experiment.py" strand_test="/home/disk/fyh/tools/scr/strand.sh" STAR_index="/home/genomewide/RNA-seq_idx/hg38/STAR" RSEM_index="/home/genomewide/RNA-seq_idx/hg38/RSEM/hg38" RefSeq="/home/genomewide/RNA-seq_idx/hg38/hg38_RefSeq.bed" fastqc_res="/home/disk/fyh/lab_other_work/STAR_test/fastqc_res" STAR_res="/home/disk/fyh/lab_other_work/STAR_test/STAR_res" RSEM_res="/home/disk/fyh/lab_other_work/STAR_test/RSEM_res" log_file="/home/disk/fyh/lab_other_work/STAR_test/quantity_log.txt" os.chdir(mainPath+"data") subprocess.Popen("ls > ../SRRfile.list",shell=True).wait() with open(mainPath+"SRRfile.list") as SRRfile: for file in SRRfile: realfile=file.rstrip() if "_1" in realfile: SRRName=realfile[:-8] read1=SRRName+"_1.fastq" read2=SRRName+"_2.fastq" subprocess.Popen(fastqc+" "+read1+" -o "+fastqc_res+" -t "+cpu,shell=True).wait() subprocess.Popen(fastqc+" "+read2+" -o "+fastqc_res+" -t "+cpu,shell=True).wait() subprocess.Popen("unzip "+fastqc_res+"/"+SRRName+"_1_fastqc.zip -d "+fastqc_res,shell=True).wait() subprocess.Popen("unzip "+fastqc_res+"/"+SRRName+"_2_fastqc.zip -d "+fastqc_res,shell=True).wait() subprocess.Popen(fastq_phred+" "+read1+"> "+mainPath+"phred.txt",shell=True).wait() subprocess.Popen('grep "Per base sequence content" '+fastqc_res+'/'+SRRName+'_1_fastqc/summary.txt | cut -f 1 > '+mainPath+'headcrop.txt',shell=True).wait() phred,headcrop="","" with open(mainPath+"phred.txt") as phredFile: phred=phredFile.readlines()[0].rstrip() with open(mainPath+"headcrop.txt") as headcropFile: headcrop=headcropFile.readlines()[0].rstrip() if headcrop=="FAIL" or headcrop=="WARN": subprocess.Popen("java -jar "+trimmomatic+" PE -phred"+phred+" "+read1+" "+read2+" "+read1+".map"+" "+read1+".unmap"+" "+read2+".map"+" "+read2+".unmap HEADCROP:12 SLIDINGWINDOW:5:20",shell=True).wait() else: subprocess.Popen("java -jar "+trimmomatic+" PE -phred"+phred+" "+read1+" "+read2+" "+read1+".map"+" "+read1+".unmap"+" "+read2+".map"+" "+read2+".unmap SLIDINGWINDOW:5:20",shell=True).wait() subprocess.Popen("mkdir "+STAR_res+"/"+SRRName,shell=True).wait() subprocess.Popen(STAR+" --runThreadN "+cpu+" --twopassMode Basic --outSAMstrandField intronMotif --genomeDir "+STAR_index+" --readFilesIn "+read1+".map "+read2+".map --outFileNamePrefix "+STAR_res+"/"+SRRName+"/ --outSAMtype BAM SortedByCoordinate --quantMode GeneCounts TranscriptomeSAM",shell=True).wait() subprocess.Popen(infer_experiment+" -i "+STAR_res+"/"+SRRName+"/Aligned.sortedByCoord.out.bam -r "+RefSeq+" > "+STAR_res+"/"+SRRName+"/strand.txt",shell=True).wait() subprocess.Popen("sh "+strand_test+" "+"../"+STAR_res+"/"+SRRName+"/strand.txt > "+mainPath+"strandInfer.txt",shell=True).wait() strand="" with open(mainPath+"strandInfer.txt") as strandFile: strand=strandFile.readlines()[0].rstrip() subprocess.Popen(RSEM+" -p "+cpu+" --bam --paired-end --forward-prob "+strand+" "+STAR_res+"/"+SRRName+"/Aligned.toTranscriptome.out.bam "+RSEM_index+" "+RSEM_res+"/"+SRRName,shell=True).wait() subprocess.Popen("rm -r "+SRRName+"*map "+fastqc_res+"/"+SRRName+"*.fastqc "+fastqc_res+"/"+SRRName+"*.zip "+RSEM_res+"/"+SRRName+".transcript.bam "+RSEM_res+"/"+SRRName+".stat "+STAR_res+"/"+SRRName,shell=True).wait() print("finished!") elif "_2" in realfile: continue
nilq/baby-python
python
# -*- coding: utf-8 -*- import urllib.request, urllib.parse, urllib.error print("\xe7\xbb\xb4\xe5\x9f\xba\xe6\x96\xb0\xe9\x97\xbb\xef\xbc\x8c\xe8\x87\xaa\xe7\x94\xb1\xe7\x9a\x84\xe6\x96\xb0\xe9\x97\xbb\xe6\xba\x90") print(urllib.parse.unquote_plus("http%3A%2F%2Fzh.wikinews.org%2Fwiki%2FWikinews%3A%25E9%25A6%2596%25E9%25A1%25B5"))
nilq/baby-python
python
""" Copyright (c) 2017, Jairus Martin. Distributed under the terms of the MIT License. The full license is in the file LICENSE, distributed with this software. Created on Aug 3, 2017 @author: jrm """ from atom.api import Typed from enamlnative.widgets.scroll_view import ProxyScrollView from .bridge import ObjcMethod, ObjcProperty from .uikit_view import UIView, UiKitView class UIScrollView(UIView): #: Properties contentSize = ObjcProperty('CGSize') #: Added by UIScrollView+AutoResize fitToContents = ObjcMethod() # axis = ObjcProperty('UILayoutConstraintAxis') # #setProgress = ObjcMethod('float', dict(animated='bool')) # addArrangedSubview = ObjcMethod('UIView') # insertArrangedSubview = ObjcMethod('UIView', dict(atIndex='NSInteger')) # removeArrangedSubview = ObjcMethod('UIView') # # UILayoutConstraintAxisHorizontal = 0 # UILayoutConstraintAxisVertical = 1 class UiKitScrollView(UiKitView, ProxyScrollView): """ An UiKit implementation of an Enaml ProxyToolkitObject. """ #: A reference to the toolkit layout created by the proxy. widget = Typed(UIScrollView) # ------------------------------------------------------------------------- # Initialization API # ------------------------------------------------------------------------- def create_widget(self): """ Create the widget """ self.widget = UIScrollView() # def update_frame(self): # """ """ # super # # d = self.declaration # # if not (d.x or d.y or d.width or d.height): # # d.width, d.height = d.parent.width, d.parent.height # # self.frame = (d.x,d.y,d.width,d.height) def init_layout(self): super(UiKitScrollView, self).init_layout() for c in self.children(): if c.frame: self.widget.contentSize = c.frame[-2:] return self.widget.fitToContents() # ------------------------------------------------------------------------- # ProxyScrollView API # ------------------------------------------------------------------------- # def set_frame(self, change): # super(UiKitScrollView, self).set_frame(change) # d = self.declaration # self.widget.contentSize = (d.width, d.height) def set_orientation(self, orientation): #: TODO: Cannot enforce direction that I'm aware of #: (but can lock direction) pass def set_scroll_by(self, delta): raise NotImplementedError def set_scroll_to(self, point): raise NotImplementedError
nilq/baby-python
python
# --------------------------------------------------------- # Tensorflow Utils Implementation # Licensed under The MIT License [see LICENSE for details] # Written by Cheng-Bin Jin # Email: sbkim0407@gmail.com # --------------------------------------------------------- import os import logging import functools import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.python.training import moving_averages logger = logging.getLogger(__name__) # logger logger.setLevel(logging.INFO) def _init_logger(log_path): formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s') # file handler file_handler = logging.FileHandler(os.path.join(log_path, 'model.log')) file_handler.setFormatter(formatter) file_handler.setLevel(logging.INFO) # stream handler stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) # add handlers logger.addHandler(file_handler) logger.addHandler(stream_handler) def padding2d(x, p_h=1, p_w=1, pad_type='REFLECT', name='pad2d'): if pad_type == 'REFLECT': return tf.pad(x, [[0, 0], [p_h, p_h], [p_w, p_w], [0, 0]], 'REFLECT', name=name) def conv2d(x, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, padding='SAME', name='conv2d', is_print=True): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, x.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(x, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) # conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) conv = tf.nn.bias_add(conv, biases) if is_print: print_activations(conv) return conv def conv3d(x, output_dim, k_h=5, k_w=5, k_d=5, d_h=2, d_w=2, d_d=2, stddev=0.02, padding='SAME', name='conv3d', is_print=True): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, k_d, x.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv3d(x, w, strides=[1, d_h, d_w, d_d, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) # conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) conv = tf.nn.bias_add(conv, biases) if is_print: print_activations(conv) return conv def deconv2d(x, k, k_h=3, k_w=3, d_h=2, d_w=2, stddev=0.02, padding_='SAME', output_size=None, name='deconv2d', with_w=False, is_print=True): with tf.variable_scope(name): input_shape = x.get_shape().as_list() # calculate output size h_output, w_output = None, None if not output_size: h_output, w_output = input_shape[1] * 2, input_shape[2] * 2 # output_shape = [input_shape[0], h_output, w_output, k] # error when not define batch_size output_shape = [tf.shape(x)[0], h_output, w_output, k] # conv2d transpose w = tf.get_variable('w', [k_h, k_w, k, input_shape[3]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, d_h, d_w, 1], padding=padding_) biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) deconv = tf.nn.bias_add(deconv, biases) if is_print: print_activations(deconv) if with_w: return deconv, w, biases else: return deconv def upsampling2d(x, size=(2, 2), name='upsampling2d'): with tf.name_scope(name): shape = x.get_shape().as_list() return tf.image.resize_nearest_neighbor(x, size=(size[0] * shape[1], size[1] * shape[2])) def linear(x, output_size, bias_start=0.0, with_w=False, name='fc'): shape = x.get_shape().as_list() with tf.variable_scope(name): matrix = tf.get_variable(name="matrix", shape=[shape[1], output_size], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bias = tf.get_variable(name="bias", shape=[output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(x, matrix) + bias, matrix, bias else: return tf.matmul(x, matrix) + bias def norm(x, name, _type, _ops, is_train=True): if _type == 'batch': return batch_norm(x, name=name, _ops=_ops, is_train=is_train) elif _type == 'instance': return instance_norm(x, name=name) elif _type == 'layer': return layer_norm(x, name=name) else: raise NotImplementedError def batch_norm(x, name, _ops, is_train=True): """Batch normalization.""" with tf.variable_scope(name): params_shape = [x.get_shape()[-1]] beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32)) gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32)) if is_train is True: mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments') moving_mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) moving_variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False) _ops.append(moving_averages.assign_moving_average(moving_mean, mean, 0.9)) _ops.append(moving_averages.assign_moving_average(moving_variance, variance, 0.9)) else: mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) variance = tf.get_variable('moving_variance', params_shape, tf.float32, trainable=False) # epsilon used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net. y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-5) y.set_shape(x.get_shape()) return y def instance_norm(x, name='instance_norm', mean=1.0, stddev=0.02, epsilon=1e-5): with tf.variable_scope(name): depth = x.get_shape()[3] scale = tf.get_variable( 'scale', [depth], tf.float32, initializer=tf.random_normal_initializer(mean=mean, stddev=stddev, dtype=tf.float32)) offset = tf.get_variable('offset', [depth], initializer=tf.constant_initializer(0.0)) # calcualte mean and variance as instance mean, variance = tf.nn.moments(x, axes=[1, 2], keep_dims=True) # normalization inv = tf.rsqrt(variance + epsilon) normalized = (x - mean) * inv return scale * normalized + offset # TODO: I'm not sure is it a good implementation of layer normalization... def layer_norm(x, name='layer_norm'): with tf.variable_scope(name): norm_axes = [1, 2, 3] mean, var = tf.nn.moments(x, axes=norm_axes, keep_dims=True) # Assume the 'neurons' axis is the third of norm_axes. This is the case for fully-connected # and BHWC conv layers. n_neurons = x.get_shape().as_list()[norm_axes[2]] offset = tf.get_variable('offset', n_neurons, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32)) scale = tf.get_variable('scale', n_neurons, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32)) # Add broadcasting dims to offset and scale (e.g. BCHW conv data) offset = tf.reshape(offset, [1 for _ in range(len(norm_axes)-1)] + [-1]) scale = tf.reshape(scale, [1 for _ in range(len(norm_axes)-1)] + [-1]) result = tf.nn.batch_normalization(x, mean, var, offset, scale, 1e-5) return result def n_res_blocks(x, _ops=None, norm_='instance', is_train=True, num_blocks=6, is_print=False): output = None for idx in range(1, num_blocks+1): output = res_block(x, x.get_shape()[3], _ops=_ops, norm_=norm_, is_train=is_train, name='res{}'.format(idx)) x = output if is_print: print_activations(output) return output # norm(x, name, _type, _ops, is_train=True) def res_block(x, k, _ops=None, norm_='instance', is_train=True, pad_type=None, name=None): with tf.variable_scope(name): conv1, conv2 = None, None # 3x3 Conv-Batch-Relu S1 with tf.variable_scope('layer1'): if pad_type is None: conv1 = conv2d(x, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='SAME', name='conv') elif pad_type == 'REFLECT': padded1 = padding2d(x, p_h=1, p_w=1, pad_type='REFLECT', name='padding') conv1 = conv2d(padded1, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='VALID', name='conv') normalized1 = norm(conv1, name='norm', _type=norm_, _ops=_ops, is_train=is_train) relu1 = tf.nn.relu(normalized1) # 3x3 Conv-Batch S1 with tf.variable_scope('layer2'): if pad_type is None: conv2 = conv2d(relu1, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='SAME', name='conv') elif pad_type == 'REFLECT': padded2 = padding2d(relu1, p_h=1, p_w=1, pad_type='REFLECT', name='padding') conv2 = conv2d(padded2, k, k_h=3, k_w=3, d_h=1, d_w=1, padding='VALID', name='conv') normalized2 = norm(conv2, name='norm', _type=norm_, _ops=_ops, is_train=is_train) # sum layer1 and layer2 output = x + normalized2 return output def identity(x, name='identity', is_print=False): output = tf.identity(x, name=name) if is_print: print_activations(output) return output def avgPoolConv(x, output_dim, filter_size=3, stride=1, name='avgPoolConv', is_print=True): with tf.variable_scope(name): output = avg_pool_2x2(x) output = conv2d(output, output_dim=output_dim, k_h=filter_size, k_w=filter_size, d_h=stride, d_w=stride) if is_print: print_activations(output) return output def convAvgPool(x, output_dim, filter_size=3, stride=1, name='convAvgPool', is_print=True): with tf.variable_scope(name): output = conv2d(x, output_dim=output_dim, k_h=filter_size, k_w=filter_size, d_h=stride, d_w=stride) output = avg_pool_2x2(output) if is_print: print_activations(output) return output def max_pool_2x2(x, name='max_pool'): with tf.name_scope(name): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def avg_pool_2x2(x, name='avg_pool'): with tf.name_scope(name): return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def sigmoid(x, name='sigmoid', is_print=False): output = tf.nn.sigmoid(x, name=name) if is_print: print_activations(output) return output def tanh(x, name='tanh', is_print=False): output = tf.nn.tanh(x, name=name) if is_print: print_activations(output) return output def relu(x, name='relu', is_print=False): output = tf.nn.relu(x, name=name) if is_print: print_activations(output) return output def lrelu(x, leak=0.2, name='lrelu', is_print=False): output = tf.maximum(x, leak*x, name=name) if is_print: print_activations(output) return output def xavier_init(in_dim): # print('in_dim: ', in_dim) xavier_stddev = 1. / tf.sqrt(in_dim / 2.) return xavier_stddev def print_activations(t): # print(t.op.name, ' ', t.get_shape().as_list()) logger.info(t.op.name + '{}'.format(t.get_shape().as_list())) def show_all_variables(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) def batch_convert2int(images): # images: 4D float tensor (batch_size, image_size, image_size, depth) return tf.map_fn(convert2int, images, dtype=tf.uint8) def convert2int(image): # transform from float tensor ([-1.,1.]) to int image ([0,255]) return tf.image.convert_image_dtype((image + 1.0) / 2.0, tf.uint8) def res_block_v2(x, k, filter_size, _ops=None, norm_='instance', is_train=True, resample=None, name=None): with tf.variable_scope(name): if resample == 'down': conv_shortcut = functools.partial(avgPoolConv, output_dim=k, filter_size=1) conv_1 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) conv_2 = functools.partial(convAvgPool, output_dim=k) elif resample == 'up': conv_shortcut = functools.partial(deconv2d, k=k) conv_1 = functools.partial(deconv2d, k=k, k_h=filter_size, k_w=filter_size) conv_2 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) elif resample is None: conv_shortcut = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) conv_1 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) conv_2 = functools.partial(conv2d, output_dim=k, k_h=filter_size, k_w=filter_size, d_h=1, d_w=1) else: raise Exception('invalid resample value') if (k == x.get_shape().as_list()[3]) and (resample is None): shortcut = x # Identity skip-connection else: shortcut = conv_shortcut(x, name='shortcut') output = x output = norm(output, _type=norm_, _ops=_ops, is_train=is_train, name='norm1') output = relu(output, name='relu1') output = conv_1(output, name='conv1') output = norm(output, _type=norm_, _ops=_ops, is_train=is_train, name='norm2') output = relu(output, name='relu2') output = conv_2(output, name='conv2') return shortcut + output
nilq/baby-python
python
import pathlib from django.utils.safestring import mark_safe CSS_PATH = (pathlib.Path(__file__).resolve().parent / 'static' / 'frontend' / 'built' / 'style' / 'email.min.css') def get(): return mark_safe(CSS_PATH.read_text(encoding='utf-8')) # nosec
nilq/baby-python
python
import os print("if you want use the service Install : ") print(":django") print(":vsftpd") Select_usr = input("Do You Want Install django and vsftpd?:") print("[Y]or[N] if Select_usr == 'Y': os.system("pip3 install django") os.system("sudo apt install vsftpd") print("[*]Services has been Installed") print("Start FTP and DJANGO Service") os.system("python3 start.py") if Select_usr == 'N': exit()
nilq/baby-python
python
""" The proper way to create an uncertain array is by calling :func:`.uarray` """ # Adding numpy arrays to GTC is not an easy exercise. # Our need is to provide convenient containers for uncertain numbers. # We do not try to integrate uncertain numbers in numpy's design. from __future__ import division import warnings from numbers import Number, Real, Complex from math import isnan, isinf from cmath import isnan as cisnan from cmath import isinf as cisinf try: from itertools import izip # Python 2 except ImportError: izip = zip xrange = range import numpy as np from GTC import is_sequence from GTC.linear_algebra import matmul from GTC.core import ( value, uncertainty, variance, dof, cos, sin, tan, acos, asin, atan, atan2, exp, log, log10, sqrt, sinh, cosh, tanh, acosh, asinh, atanh, mag_squared, magnitude, phase, result, ) from GTC.lib import ( UncertainReal, UncertainComplex ) def _isnan(number): val = value(number) if isinstance(val, Real): return isnan(val) elif isinstance(val, Complex): return cisnan(val) else: raise TypeError('cannot calculate isnan of type {}'.format(type(number))) def _isinf(number): val = value(number) if isinstance(val, Real): return isinf(val) elif isinstance(val, Complex): return cisinf(val) else: raise TypeError('cannot calculate isinf of type {}'.format(type(number))) # Note numpy defines its own numeric types, instead of bool, int, # float, complex, that have additional attributes. These types are needed by # functions like `numpy.average`. (Uses `dtype` and `.size` attributes # on the result returned by `mean`, as defined in a subclass if available.) # One way to fix this is to add the required attributes # to all the return values from `UncertainArray` methods. # Another option is to ensure that array elements # are always numpy-compatible and to ensure that all # uncertain number objects are initialised with # a.dtype = np.dtype('O') # a.size = 1 # a.shape = () # # Our use of `dtype=object` for arrays means that numeric # elements are not cast to numpy types when loaded into an array. # To fix this would require iteration through all arrays as they # are being created! #-------------------------------------------------------------------- class UncertainArray(np.ndarray): """An :class:`UncertainArray` can contain elements of type :class:`int`, :class:`float`, :class:`complex`, :class:`.UncertainReal` or :class:`.UncertainComplex`. Do not instantiate this class directly. Use :func:`~.uarray` instead. Base: :class:`numpy.ndarray` .. versionadded:: 1.1 """ def __new__(cls, array, dtype=None, label=None): # The first case allows users to create uarray instances # with a definite numpy number type. This could be done # by wrapping a call to uarray() around an ndarray. # Without this, the type gets converted back to Python. if isinstance(array, np.ndarray): dtype = array.dtype elif dtype is None: dtype = np.dtype('O') obj = np.asarray(array, dtype=dtype).view(cls) obj._label = label return obj def __array_finalize__(self, obj): if obj is None: return self._label = getattr(obj, 'label', None) # numpy looks at type().__name__ when preparing # a string representation of the object. This # change means we see `uarray` not `UncertainArray`. self.__class__.__name__ = 'uarray' self._broadcasted_shape = None def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): try: attr = getattr(self, '_' + ufunc.__name__) except AttributeError: # Want to raise a NotImplementedError without nested exceptions # In Python 3 this could be achieved by "raise Exception('...') from None" attr = None if attr is None: raise NotImplementedError( 'The {} function has not been implemented'.format(ufunc) ) if kwargs: warnings.warn('**kwargs, {}, are currently not supported' .format(kwargs), stacklevel=2) case = len(inputs) if case == 1: pass # Must be an UncertainArray elif case == 2: # At least 1 of the inputs must be an UncertainArray # If an input is not an ndarray then convert it to be an ndarray not0 = not isinstance(inputs[0], np.ndarray) if not0 or not isinstance(inputs[1], np.ndarray): # A tuple cannot be modified # This does not create a copy of the items inputs = list(inputs) # convert the input that is not an ndarray convert, keep = (0, 1) if not0 else (1, 0) if isinstance(inputs[convert], (Number, UncertainReal, UncertainComplex)): inputs[convert] = np.full(inputs[keep].shape, inputs[convert], dtype=object) else: inputs[convert] = np.asarray(inputs[convert], dtype=object) self._broadcasted_shape = None if inputs[0].shape != inputs[1].shape: broadcasted = np.broadcast(*inputs) inputs = broadcasted.iters self._broadcasted_shape = broadcasted.shape else: assert False, 'Should not occur: __array_ufunc__ received {} inputs'.format(case) return attr(*inputs) def __repr__(self): # Use the numpy formatting but hide the default dtype np_array_repr = np.array_repr(self) if self.dtype == object: # Truncate string from trailing ',' i = np_array_repr.rfind(',') return np_array_repr[:i] + ')' else: return np_array_repr def __matmul__(self, other): # Implements the protocol used by the '@' operator defined in PEP 465. return matmul(self, other) def __rmatmul__(self, other): # Implements the protocol used by the '@' operator defined in PEP 465. return matmul(other, self) def _matmul(self, *inputs): # np.matmul became a ufunc in version 1.16.0 return matmul(*inputs) def _create_empty(self, inputs=None, dtype=None, order='C'): if dtype is None: dtype = object shape = self.shape if self._broadcasted_shape is None else self._broadcasted_shape a = np.empty(shape, dtype=dtype, order=order) if inputs is None: return a, a.itemset, self.flat if len(inputs) == 1: return a, a.itemset, inputs[0].flat if isinstance(inputs[0], np.ndarray): return a, a.itemset, izip(inputs[0].flat, inputs[1].flat) # then the inputs are already broadcasted iterators return a, a.itemset, izip(*inputs) @property def label(self): """The label that was assigned to the array when it was created. **Example**:: >>> current = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)], label='amps') >>> current.label 'amps' :rtype: :class:`str` """ return self._label @property def real(self): """The result of applying the attribute ``real`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> a.real uarray([ureal(1.2,0.6,inf), ureal(3.2,1.4,inf), ureal(0.0,0.9,inf)]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, item.real) return UncertainArray(arr) @property def imag(self): """The result of applying the attribute ``imag`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> a.imag uarray([ureal(-0.5,0.6,inf), ureal(1.2,0.2,inf), ureal(1.5,0.9,inf)]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, item.imag) return UncertainArray(arr) @property def r(self): """The result of applying the attribute ``r`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, (1.2, 0.7, 0.7, 2.2)), ... ucomplex(-0.2+1.2j, (0.9, 0.4, 0.4, 1.5))]) >>> a.r uarray([0.43082021842766455, 0.34426518632954817]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, item.r) return UncertainArray(arr) @property def x(self): """The result of :func:`~.core.value` for each element in the array. **Example**:: >>> a = la.uarray([0.57, ureal(0.45, 0.12), ucomplex(1.1+0.68j, 0.19)]) >>> a.x uarray([0.57, 0.45, (1.1+0.68j)]) :rtype: :class:`UncertainArray` """ return self.value() def value(self): """The result of :func:`~.core.value` for each element in the array. **Example**:: >>> a = la.uarray([0.57, ureal(0.45, 0.12), ucomplex(1.1+0.68j, 0.19)]) >>> a.value() uarray([0.57, 0.45, (1.1+0.68j)]) :rtype: :class:`UncertainArray` """ # Note: in the future we might allow different `dtype` values. # However, this needs some thought. Should `dtype=float` # return complex numbers as a pair of reals, for example? # What are the most likely use-cases? # :param dtype: The data type of the returned array. # :type dtype: :class:`numpy.dtype` arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, value(item)) return UncertainArray(arr) @property def u(self): """The result of :func:`~.core.uncertainty` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.u uarray([0.18, 0.12, 0.19]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> c.u uarray([StandardUncertainty(real=0.6, imag=0.6), StandardUncertainty(real=1.4, imag=0.2), StandardUncertainty(real=0.9, imag=0.9)]) :rtype: :class:`UncertainArray` """ return self.uncertainty() def uncertainty(self): """The result of :func:`~.core.uncertainty` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.uncertainty() uarray([0.18, 0.12, 0.19]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> c.uncertainty() uarray([StandardUncertainty(real=0.6, imag=0.6), StandardUncertainty(real=1.4, imag=0.2), StandardUncertainty(real=0.9, imag=0.9)]) :rtype: :class:`UncertainArray` """ # Note: in the future we might allow different `dtype` values. # However, we need to consider the use-cases carefully. # :param dtype: The data type of the returned array. # :type dtype: :class:`numpy.dtype` arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, uncertainty(item)) return UncertainArray(arr) @property def v(self): """The result of :func:`~.core.variance` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.v uarray([0.0324, 0.0144, 0.0361]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.5, 0.5)), ucomplex(1.5j, 0.9)]) >>> c.v uarray([VarianceCovariance(rr=0.36, ri=0.0, ir=0.0, ii=0.36), VarianceCovariance(rr=2.25, ri=0.0, ir=0.0, ii=0.25), VarianceCovariance(rr=0.81, ri=0.0, ir=0.0, ii=0.81)]) :rtype: :class:`UncertainArray` """ return self.variance() def variance(self): """The result of :func:`~.core.variance` for each element in the array. **Example**:: >>> r = la.uarray([ureal(0.57, 0.18), ureal(0.45, 0.12), ureal(0.68, 0.19)]) >>> r.variance() uarray([0.0324, 0.0144, 0.0361]) >>> c = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.5, 0.5)), ucomplex(1.5j, 0.9)]) >>> c.variance() uarray([VarianceCovariance(rr=0.36, ri=0.0, ir=0.0, ii=0.36), VarianceCovariance(rr=2.25, ri=0.0, ir=0.0, ii=0.25), VarianceCovariance(rr=0.81, ri=0.0, ir=0.0, ii=0.81)]) :rtype: :class:`UncertainArray` """ # Note: in the future we might allow different `dtype` values. # However, we need to consider the use-cases carefully. # :param dtype: The data type of the returned array. # :type dtype: :class:`numpy.dtype` arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, variance(item)) return UncertainArray(arr) @property def df(self): """The result of :func:`~.core.dof` for each element in the array. **Example**:: >>> a = la.uarray([ureal(6, 2, df=3), ureal(4, 1, df=4), ureal(5, 3, df=7), ureal(1, 1)]) >>> a.df uarray([3.0, 4.0, 7.0, inf]) :rtype: :class:`UncertainArray` """ return self.dof() def dof(self): """The result of :func:`~.core.dof` for each element in the array. **Example**:: >>> a = la.uarray([ureal(6, 2, df=3), ureal(4, 1, df=4), ureal(5, 3, df=7), ureal(1, 1)]) >>> a.dof() uarray([3.0, 4.0, 7.0, inf]) :rtype: :class:`UncertainArray` """ arr, itemset, iterator = self._create_empty(dtype=None) for i, item in enumerate(iterator): itemset(i, dof(item)) return UncertainArray(arr) def sensitivity(self, x): """The result of :func:`~.reporting.sensitivity` for each element in the array. :rtype: :class:`UncertainArray` """ # Note, there is a case for introducing `dtype` or some other parameter. # The return types for complex cases may be multivariate. # `_create_empty()` handles only ndarray-like sequences if not isinstance(x, np.ndarray): x = np.asarray(x) arr, itemset, iterator = self._create_empty((self, x)) for i, (y, x) in enumerate(iterator): itemset(i, y.sensitivity(x)) return UncertainArray(arr) def u_component(self, x): """The result of :func:`~.reporting.u_component` for each element in the array. :rtype: :class:`UncertainArray` """ # Note, there is a case for introducing `dtype` or some other parameter. # The return types for complex cases may be multivariate. # `_create_empty()` handles only ndarray-like sequences if not isinstance(x, np.ndarray): x = np.asarray(x) arr, itemset, iterator = self._create_empty((self, x)) for i, (y, x) in enumerate(iterator): itemset(i, y.u_component(x)) return UncertainArray(arr) def conjugate(self): """The result of applying the attribute ``conjugate`` to each element in the array. **Example**:: >>> a = la.uarray([ucomplex(1.2-0.5j, 0.6), ucomplex(3.2+1.2j, (1.4, 0.2)), ucomplex(1.5j, 0.9)]) >>> a.conjugate() uarray([ucomplex((1.2+0.5j), u=[0.6,0.6], r=0.0, df=inf), ucomplex((3.2-1.2j), u=[1.4,0.2], r=0.0, df=inf), ucomplex((0-1.5j), u=[0.9,0.9], r=0.0, df=inf)]) :rtype: :class:`UncertainArray` """ # override this method because I wanted to create a custom __doc__ return self._conjugate() def _conjugate(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, item.conjugate()) return UncertainArray(arr) def _positive(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, +item) return UncertainArray(arr) def _negative(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, -item) return UncertainArray(arr) def _add(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a + b) return UncertainArray(arr) def _subtract(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a - b) return UncertainArray(arr) def _multiply(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a * b) return UncertainArray(arr) def _divide(self, *inputs): return self._true_divide(*inputs) def _true_divide(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a / b) return UncertainArray(arr) def _power(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): itemset(i, a ** b) return UncertainArray(arr) def _exp(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, exp(item)) return UncertainArray(arr) def _log(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, log(item)) return UncertainArray(arr) def _log10(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, log10(item)) return UncertainArray(arr) def _sqrt(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, sqrt(item)) return UncertainArray(arr) def _cos(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, cos(item)) return UncertainArray(arr) def _sin(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, sin(item)) return UncertainArray(arr) def _tan(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, tan(item)) return UncertainArray(arr) def _arccos(self, *ignore): return self._acos() def _acos(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, acos(item)) return UncertainArray(arr) def _arcsin(self, *ignore): return self._asin() def _asin(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, asin(item)) return UncertainArray(arr) def _arctan(self, *ignore): return self._atan() def _atan(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, atan(item)) return UncertainArray(arr) def _arctan2(self, *inputs): return self._atan2(inputs[1]) def _atan2(self, *inputs): arr, itemset, iterator = self._create_empty((self, inputs[0])) for i, (a, b) in enumerate(iterator): itemset(i, atan2(a, b)) return UncertainArray(arr) def _sinh(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, sinh(item)) return UncertainArray(arr) def _cosh(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, cosh(item)) return UncertainArray(arr) def _tanh(self, *ignore): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, tanh(item)) return UncertainArray(arr) def _arccosh(self, *ignore): return self._acosh() def _acosh(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, acosh(item)) return UncertainArray(arr) def _arcsinh(self, *ignore): return self._asinh() def _asinh(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, asinh(item)) return UncertainArray(arr) def _arctanh(self, *ignore): return self._atanh() def _atanh(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, atanh(item)) return UncertainArray(arr) def _square(self, *ignore): return self._mag_squared() def _mag_squared(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, mag_squared(item)) return UncertainArray(arr) def _magnitude(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, magnitude(item)) return UncertainArray(arr) def _phase(self): arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): itemset(i, phase(item)) return UncertainArray(arr) def _intermediate(self, labels): # Default second argument of calling function is `None` if labels is None: arr, itemset, iterator = self._create_empty() for i, x in enumerate(iterator): itemset(i, result(x)) else: # `_create_empty()` handles only ndarray-like sequences if not is_sequence(labels): # Add index notation to the label base labels = [ "{}[{}]".format(labels, i) for i in xrange(self.size) ] labels = np.asarray(labels) arr, itemset, iterator = self._create_empty((self, labels)) for i, (x, lbl) in enumerate(iterator): itemset(i, result(x, lbl)) return UncertainArray(arr) def _equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a == b) return arr def _not_equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a != b) return arr def _less(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a < b) return arr def _less_equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a <= b) return arr def _greater(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a > b) return arr def _greater_equal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, (a, b) in enumerate(iterator): itemset(i, a >= b) return arr def _maximum(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): if _isnan(a): itemset(i, a) elif _isnan(b): itemset(i, b) elif a > b: itemset(i, a) else: itemset(i, b) return UncertainArray(arr) def _minimum(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, (a, b) in enumerate(iterator): if _isnan(a): itemset(i, a) elif _isnan(b): itemset(i, b) elif a < b: itemset(i, a) else: itemset(i, b) return UncertainArray(arr) def _logical_and(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=object) for i, (a, b) in enumerate(iterator): itemset(i, a and b) return UncertainArray(arr) def _logical_or(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=object) for i, (a, b) in enumerate(iterator): itemset(i, a or b) return UncertainArray(arr) def _logical_xor(self, *inputs): raise TypeError( "Boolean bitwise operations are not defined for `UncertainArray`" ) # arr, itemset, iterator = self._create_empty(inputs, dtype=bool) # for i, (a, b) in enumerate(iterator): # itemset(i, bool(a) ^ bool(b)) # return arr def _logical_not(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, not bool(item)) return arr def _isinf(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, _isinf(item)) return arr def _isnan(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, _isnan(item)) return arr def _isfinite(self, *inputs): arr, itemset, iterator = self._create_empty(inputs, dtype=bool) for i, item in enumerate(iterator): itemset(i, not (_isnan(item) or _isinf(item))) return arr def _reciprocal(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, item in enumerate(iterator): itemset(i, 1.0/item) return UncertainArray(arr) def _absolute(self, *inputs): arr, itemset, iterator = self._create_empty(inputs) for i, item in enumerate(iterator): itemset(i, abs(item)) return UncertainArray(arr) def copy(self, order='C'): arr, itemset, iterator = self._create_empty(order=order) for i, item in enumerate(iterator): itemset(i, +item) return UncertainArray(arr, label=self.label) def round(self, decimals=0, **kwargs): digits = kwargs.get('digits', decimals) df_decimals = kwargs.get('df_decimals', digits) arr, itemset, iterator = self._create_empty() for i, item in enumerate(iterator): try: itemset(i, item._round(digits, df_decimals)) except AttributeError: try: itemset(i, round(item, digits)) except TypeError: itemset(i, complex(round(item.real, digits), round(item.imag, digits))) return UncertainArray(arr) def sum(self, *args, **kwargs): raise TypeError( "`sum` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).sum(*args, **kwargs)) def mean(self, *args, **kwargs): raise TypeError( "`mean` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).mean(*args, **kwargs)) def std(self, *args, **kwargs): # If this is to be implemented we need to be clear about # what is calculated. This will not be an uncertain-number # calculation, it will take the values of a sample of uncertain # numbers and evaluate the SD. This will probably be clearer # if the function is in the `type_a` module. # Note we would also want a similar function to calculate # the standard error (ie the type-A uncertainty). raise TypeError( "`std` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).std(*args, **kwargs)) def var(self, *args, **kwargs): # If this is to be implemented we need to be clear about # what is calculated. This will not be an uncertain-number # calculation, it will take the values of a sample of uncertain # numbers and evaluate the SD. This will probably be clearer # if the function is in the `type_a` module. # Note we would also want a similar function to calculate # the standard variance (ie the type-A uncertainty squared). raise TypeError( "`var` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).var(*args, **kwargs)) def max(self, *args, **kwargs): raise TypeError( "`max` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).max(*args, **kwargs)) def min(self, *args, **kwargs): raise TypeError( "`min` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).min(*args, **kwargs)) def trace(self, *args, **kwargs): raise TypeError( "`trace` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).trace(*args, **kwargs)) def cumprod(self, *args, **kwargs): # numpy catches ``TypeError`` and uses its # internal implementation of this method raise RuntimeError( "`cumprod` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).cumprod(*args, **kwargs)) def cumsum(self, *args, **kwargs): # numpy catches ``TypeError`` and uses its # internal implementation of this method raise RuntimeError( "`cumsum` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).cumsum(*args, **kwargs)) def prod(self, *args, **kwargs): raise TypeError( "`prod` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).prod(*args, **kwargs)) def ptp(self, *args, **kwargs): raise TypeError( "`ptp` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self).ptp(*args, **kwargs)) def any(self, *args, **kwargs): raise TypeError( "`any` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self, dtype=bool).any(*args, **kwargs)) def all(self, *args, **kwargs): raise TypeError( "`all` is not defined for `UncertainArray`" ) # return UncertainArray(np.asarray(self, dtype=bool).all(*args, **kwargs)) # Allows pickle to understand the class name 'uarray' uarray = UncertainArray
nilq/baby-python
python
''' The default translation file removes all the attributes with empty values ''' def filterTags(attrs): if not attrs: return tags = {} for k,v in attrs.iteritems(): if v: tags.update({k: v}) return tags
nilq/baby-python
python
import os import unittest def resolve_runfile(path): if os.getenv('RUNFILES_MANIFEST_ONLY') != "1": return os.path.join(os.environ['TEST_SRCDIR'], path) manifest = os.getenv('RUNFILES_MANIFEST_FILE') with open(manifest) as f: for line in f.readlines(): if line.split()[0] == path: return line.split()[1] raise "Cannot find %s in manifest %s" % (path, manifest) class CheckVersionTest(unittest.TestCase): BZL_PATH = 'build_bazel_rules_nodejs/internal/common/check_version.bzl' def setUp(self): self.globals = {} exec(open(resolve_runfile(self.BZL_PATH)).read(), self.globals) def testVersionComparison(self): result = self.globals['check_version']('1.2.2', '1.2.3') self.assertIs(result, False) def testVersionRangeWithin(self): result = self.globals['check_version_range']('1.2.2', '1.2.1', '1.2.3') self.assertIs(result, True) def testVersionOutOfLowRange(self): result = self.globals['check_version_range']('1.2.0', '1.2.1', '1.2.3') self.assertIs(result, False) def testVersionOutOfHighRange(self): result = self.globals['check_version_range']('1.2.4', '1.2.1', '1.2.3') self.assertIs(result, False) def testNotAlphaComparison(self): result = self.globals['check_version']('1.12.3', '1.2.1') self.assertIs(result, True) def testReleaseCandidate(self): result = self.globals['check_version']('0.8.0rc2', '0.8.0') self.assertIs(result, True) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
import tensorflow as tf import sys sys.path.append('./ext/voxelmorph/') sys.path.append('./ext/neurite-master/') sys.path.append('./ext/pynd-lib/') sys.path.append('./ext/pytools-lib/') from voxelmorph.tf.losses import Grad, NCC, NonSquareNCC loss_object = tf.keras.losses.MeanSquaredError() # used for GAN + def. reg. loss_object_NCC = NCC(win=[9]*3) # used for registration loss_object_NonSquareNCC = NonSquareNCC(win=[9]*3) # not used in paper # ---------------------------------------------------------------------------- # Generator losses @tf.function def total_variation3D(ypred): """ Not used in paper. Calculates anisotropic total variation for a 3D image ypred. """ pixel_dif1 = ypred[:, 1:, :, :, :] - ypred[:, :-1, :, :, :] pixel_dif2 = ypred[:, :, 1:, :, :] - ypred[:, :, :-1, :, :] pixel_dif3 = ypred[:, :, :, 1:, :] - ypred[:, :, :, :-1, :] tot_var = ( tf.reduce_mean(tf.math.abs(pixel_dif1)) + tf.reduce_mean(tf.math.abs(pixel_dif2)) + tf.reduce_mean(tf.math.abs(pixel_dif3)) ) return tf.reduce_mean(tot_var) @tf.function def generator_loss( disc_opinion_fake_local, disp_ms, disp, moved_atlases, fixed_images, epoch, sharp_atlases, loss_wts, start_step=0, reg_loss_type='NCC', ): """Loss function for Generator: Args: disc_opinion_fake_local: tf float Local feedback from discriminator. disp_ms: tf float Moving average of displacement fields. disp: tf float Displacement fields. moved_atlases: tf float Moved template images. fixed_images: tf float Target images. epoch: int Training step. sharp_atlases: tf float Generated Template image. loss_wts: list List of regularization weights for gan loss, deformation, and TV. start_step: int Training step to start training adversarial component. """ lambda_gan, lambda_reg, lambda_tv = loss_wts # If training registration only, without GAN loss. # Need to do this, otherwise graph detaches: if epoch >= start_step: gan_loss = loss_object( tf.ones_like(disc_opinion_fake_local), disc_opinion_fake_local, ) if lambda_tv > 0.0: # never happens as TV loss not used in paper tv_loss = total_variation3D(sharp_atlases) else: tv_loss = 0.0 else: gan_loss = 0.0 tv_loss = 0.0 # Similarity terms: if reg_loss_type == 'NCC': similarity_loss = tf.reduce_mean( loss_object_NCC.loss(moved_atlases, fixed_images), ) elif reg_loss_type == 'NonSquareNCC': # Not used in paper. similarity_loss = tf.reduce_mean( loss_object_NonSquareNCC.loss(moved_atlases, fixed_images), ) # smoothness terms: smoothness_loss = tf.reduce_mean( Grad('l2').loss(tf.zeros_like(disp), disp), ) # magnitude terms: magnitude_loss = loss_object(tf.zeros_like(disp), disp) moving_magnitude_loss = loss_object(tf.zeros_like(disp_ms), disp_ms) # Choose between registration only or reg+gan training: if epoch < start_step: total_gen_loss = ( (lambda_reg * smoothness_loss) + (0.01 * lambda_reg * magnitude_loss) + (lambda_reg * moving_magnitude_loss) + 1*similarity_loss ) else: total_gen_loss = ( lambda_gan*gan_loss + (lambda_reg * smoothness_loss) + (0.01 * lambda_reg * magnitude_loss) + (lambda_reg * moving_magnitude_loss) + 1*similarity_loss + lambda_tv*tv_loss ) return ( total_gen_loss, gan_loss, smoothness_loss, magnitude_loss, similarity_loss, moving_magnitude_loss, tv_loss, ) # ---------------------------------------------------------------------------- # Discriminator losses @tf.function def discriminator_loss( disc_opinion_real_local, disc_opinion_fake_local, ): """Loss function for Generator: Args: disc_opinion_fake_local: tf float Local feedback from discriminator on moved templates. disc_opinion_real_local: tf float Local feedback from discriminator on real fixed images. """ gan_fake_loss = loss_object( tf.zeros_like(disc_opinion_fake_local), disc_opinion_fake_local, ) gan_real_loss = loss_object( tf.ones_like(disc_opinion_real_local), disc_opinion_real_local, ) total_loss = 0.5*(gan_fake_loss + gan_real_loss) return total_loss
nilq/baby-python
python
import time import datetime import shutil import os import sys sys.path.insert( 0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) import emdee print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) # In this case we want to LOAD the results of a previous emdee run to pick up where # we left off. With mode set to 'load' and loc pointing to the subdirectory containing # a previous set of results (LOG.txt, last_lnprob.txt, etc...), an Emdee class is # populated with the loaded data and is ready to continue iterating. Changes to the # number of walkers or the parameters (and bounds) should not be made at this point. # This is primarily for continuing runs that may have crashed, or completed successfully # but haven't reached burn in (if running in small chunks locally, for example). emdeeClass = emdee.Emdee(mode='load',loc='example_output') emdeeClass.PrintParams() # Just to check, for example # As before, we just run another batch of iterations picking up from where the previous # run that we loaded had left off. emdeeClass.GoMCMC(100) print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
nilq/baby-python
python
############################################################################### # # file: typing.py # # Purpose: refer to module documentation for details # # Note: This file is part of Termsaver application, and should not be used # or executed separately. # ############################################################################### # # Copyright 2012 Termsaver # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ############################################################################### """ A helper class used for screens that require more dynamic output to users. See additional information in the class itself. The helper class available here is: * `TypingHelperBase` """ # # Python built-in modules # import sys import time # # Internal modules # from termsaverlib.screen.helper import ScreenHelperBase from termsaverlib import constants class TypingHelperBase(ScreenHelperBase): """ This helper class gives functionality to screens to print out information in a more interactive way, simulating a typing writer machine, based on two main speed control properties: * `delay`: defines the delay for printing out characters of a string * `line_delay`: defines the delay for printing out new lines within a string (sometimes, setting different proportions make a lot of a difference) If no values are defined by the screen itself, default values should be used. The `delay` is set in `constants.Settings.CHAR_DELAY_SECONDS`, and the `line_delay` is 10 times the value of delay. To use this screen helper is pretty straightforward, just call the method: * `typing_print`: this will print the specified text string using the speed controls `delay` and `line_delay`. """ delay = None """ Defines the character printing delay, to give a cool visual of a typing machine. This value is measured in seconds, and default marks are defined in `constants.Settings.CHAR_DELAY_SECONDS`. """ line_delay = None """ Defines the delay imposed to every new line prior to char printing. By default, its value is 10x the `delay`. """ def typing_print(self, text): """ Prints text with standard output to allow side-by-side printing, and give the impression of a typing writer machine. The speed is controlled by properties of this class: `delay` and `line_delay`. Arguments: * text: the text to be printed in typing style Notes: * This also supports new lines (\n) * blank spaces, due to its lack of meaning, are ignored for speed limiting, so they will be flushed all at once. """ # set defaults if self.delay is None: self.delay = constants.Settings.CHAR_DELAY_SECONDS if self.line_delay is None: self.line_delay = 10 * self.delay splitText = text.split('\n') for line in splitText: for char in line: sys.stdout.write(char) # only pause if it is not a blank space if char != ' ': time.sleep(self.delay) sys.stdout.flush() # need to re-print the line removed from the split sys.stdout.write('\n') time.sleep(self.line_delay) # specific pause for new lines
nilq/baby-python
python
from .runners import Noun # ?
nilq/baby-python
python
class Value: def __get__(self, instance, instance_type): return self.amount def __set__(self, instance, value): self.amount = value - instance.commission * value class Account: amount = Value() def __init__(self, commission): self.commission = commission """ new_account = Account(0.1) new_account.amount = 100 print(new_account.amount) #90 """ #Teacher's solution: class Value2: def __init__(self): self.amount = 0 def __get__(self, obj, obj_type): return self.amount def __set__(self, obj, value): self.amount = value - value * obj.commission
nilq/baby-python
python
""" 395. Longest Substring with At Least K Repeating Characters This question is listed as a medium question under sliding window category. But sliding window approach is too complex and maybe a hard problem for that case, simple approach is doing a dfs. But it is expensive. The following solution is not optimal but works. Time complexity -> O(N) for stack iteration, O(N) for set operation, O(N) for count operation. ==> O(N3) where N is the length of string """ class Solution: def longestSubstring(self, s: str, k: int) -> int: stack = [s] max_len = 0 while stack: tmp = stack.pop() for key in set(tmp): if tmp.count(key) < k: parts = tmp.split(key) stack.extend(parts) break else: max_len = max(max_len, len(tmp)) return max_len
nilq/baby-python
python
from sys import argv from pathlib import Path from datetime import date from time import strftime import json # Logging (console) def print_log(message): """Simple logging function: Adds timestamp before message""" print(strftime("%H:%M:%S") + ": " + message) # Basic structures def set_date(date_=None): """Provides the processing date""" if not date_: return date.today().strftime("%y-%m-%d") else: return date_ def get_categories(): """Provides the basic categories of data - confirmed: Confirmed cases - deaths: Deaths - recovered: Recovered cases - active: Active cases (confirmed - deaths - recovered) """ return ["confirmed", "deaths", "recovered", "active"] def get_variants(category): """Provides the different data variants""" variants = [ "cum", "cum_rel_popmio", "cum_rel_pop100k", "diff", "diff_rel_popmio", "diff_rel_pop100k", "diff_ma1w", "diff_rel_popmio_ma1w", "diff_rel_pop100k_ma1w", "diff_rel_active", ] if category == "active": return variants return variants[:-1] # Web-related information def get_feed_url(category): """Provides the data urls of John Hopkins University's GitHub project (confirmed, deaths, recovered) """ with get_settings_file_path("urls").open("r") as file: return json.load(file)[category] # Paths and files def get_dir_path(key, date_=None): """Sets up the directory structure used in the rest of the application: - script_path/settings: For settings (json-files with parameters) - output_path/data/dte/feed: For the raw downloaded data - output_path/data/dte: For the prepared data - output_path/plots/dte: For the generated plots """ # Determine settings directory: Subdirectory of the directory in which the # script is located, named "settings" if key == "settings": return Path(argv[0]).parent / key # Determine the output directory: Either stored in the "output_dir.json"- # file located in the settings directory or the directory in which the # script is located path = Path(argv[0]).parent if get_settings_file_path("output_dir").exists(): with get_settings_file_path("output_dir").open("r") as file: settings = json.load(file) if settings["OUTPUT_DIR"] != "": path = Path(settings["OUTPUT_DIR"]) # Output directories if key in ["base_data", "base_plots"]: path = path / key[5:] elif key in ["data", "plots"]: path = get_dir_path("base" + "_" + key) / date_ elif key == "feed": path = get_dir_path("data", date_) / key path.mkdir(parents=True, exist_ok=True) return path def get_settings_file_path(key): """Provides path to the settings files (json-files stored in the folder ../settings, containing some basic parameters and definitions) """ return get_dir_path("settings").joinpath(key + ".json") def get_feed_file_path(date_, category): """Provides paths to the CSV-files used for saving the downloaded data: dir_base/dte/data/feed_(confirmed/deaths/recovered).csv """ return get_dir_path("feed", date_).joinpath(category + ".csv") def get_data_file_path(date_, name="data", file_format="json"): """Provides the path to the prepared csv/json-files from day dte containing the data for category cat and variant var """ return get_dir_path("data", date_) / f"{name}.{file_format}" def get_plot_file_path(date_, base, *args): """Provides the path to the plot-file generated from day dte-data, defined by the categories and variants specified in *args """ filename = base for arg in args: filename += "_" + arg filename += ".png" path = get_dir_path("plots", date_).joinpath(base) path.mkdir(parents=True, exist_ok=True) return path.joinpath(filename) def get_region(region, subregion="-"): """Provides lists of countries organized in regions (e.g. Europe, middle, south, east, north, ...). Definitions are stored in the settings file regions.json in the folder ../settings. """ with get_settings_file_path("regions").open("r") as file: return json.load(file)[region][subregion]
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2012 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Pablo Orduña <pablo@ordunya.com> # from __future__ import print_function, unicode_literals import os import time from collections import OrderedDict from weblab.util import data_filename import flask_admin def weblab_httpd_config_generate(directory): print("Generating HTTPd configuration files... ", end='') result = httpd_config_generate(directory) print("[done]") return result def httpd_config_generate(directory): debugging_variables = {} execfile(os.path.join(directory, 'debugging.py'), debugging_variables) ports = debugging_variables.get('PORTS', {}).get('json') base_url = debugging_variables.get('BASE_URL', '') if base_url in ('','/'): base_url = '' static_directories = OrderedDict() #{ # url path : disk path # } static_directories[base_url + '/weblab/client'] = data_filename('weblab/core/static/oldclient').replace('\\','/') # \ => / for Windows flask_admin_static = os.path.join(os.path.dirname(flask_admin.__file__), 'static') static_directories[base_url + '/weblab/admin/static'] = flask_admin_static.replace('\\','/') # TODO: Avoid repeated paths static_directories[base_url + '/weblab/instructor/static'] = data_filename('weblab/admin/web/static').replace('\\','/') static_directories[base_url + '/weblab/profile/static'] = data_filename('weblab/admin/web/static').replace('\\','/') static_directories[base_url + '/weblab/web/static'] = data_filename('weblab/core/static').replace('\\','/') static_directories[base_url + '/weblab/static'] = data_filename('weblab/core/static').replace('\\','/') static_directories[base_url + '/weblab/gwt/weblabclientlab'] = data_filename('war/weblabclientlab').replace('\\','/') static_directories[base_url + '/weblab/web/pub'] = os.path.abspath(os.path.join(directory, 'pub')).replace('\\','/') files = {} apache_contents = _apache_generation(directory, base_url, ports, static_directories) files['apache'] = _set_contents(directory, 'httpd/apache_weblab_generic.conf', apache_contents) simple_httpd_contents = _simple_httpd_generation(directory, base_url, ports, static_directories) files['simple'] = _set_contents(directory, 'httpd/simple_server_config.py', simple_httpd_contents) # TODO: support nginx return files def _set_contents(directory, filename, new_contents): original_path = os.path.join(directory, filename) destination_path = os.path.join(directory, filename + "-backup-" + time.strftime("%Y-%m-%d_%H-%M-%S")) if os.path.exists(original_path): original_contents = open(original_path).read() open(destination_path, 'w').write(original_contents) open(original_path, 'w').write(new_contents) return os.path.abspath(original_path) def _apache_generation(directory, base_url, ports, static_directories): apache_conf = ( "\n" """<LocationMatch (.*)nocache\.js$>\n""" """ Header Set Cache-Control "max-age=0, no-store"\n""" """</LocationMatch>\n""" """\n""" """<Files *.cache.*>\n""" """ Header Set Cache-Control "max-age=2592000"\n""" """</Files>\n""" """\n""" """# Apache redirects the regular paths to the particular directories \n""" # """RedirectMatch ^%(root)s$ %(root)s/weblab/\n""" # """RedirectMatch ^%(root)s/$ %(root)s/weblab/\n""" """RedirectMatch ^%(root)s/weblab$ %(root)s/weblab/\n""" """RedirectMatch ^%(root)s/weblab/client/$ %(root)s/weblab/client/index.html\n""" """\n""") for static_url, static_directory in static_directories.items(): apache_conf += """Alias %(static_url)s %(static_directory)s\n""" % dict(static_url=static_url, static_directory=static_directory) apache_conf += ( """\n""" """<Location %(root)s/weblab/>\n""" """ <IfModule authz_core_module>\n""" """ Require all granted\n""" """ </IfModule>\n""" """\n""" """ <IfModule !authz_core_module>\n""" """ Order allow,deny\n""" """ Allow from All\n""" """ </IfModule>\n""" """</Location>\n""" """\n""" """<Directory "%(directory)s">\n""" """ Options Indexes FollowSymLinks\n""" """\n""" """ <IfModule authz_core_module>\n""" """ Require all granted\n""" """ </IfModule>\n""" """\n""" """ <IfModule !authz_core_module>\n""" """ Order allow,deny\n""" """ Allow from All\n""" """ </IfModule>\n""" """</Directory>\n""" """\n""") previous = [] for static_directory in static_directories.values(): if static_directory in previous: continue previous.append(static_directory) apache_conf += ("""<Directory "%(static_directory)s">\n""" """ Options Indexes FollowSymLinks\n""" """\n""" """ <IfModule authz_core_module>\n""" """ Require all granted\n""" """ </IfModule>\n""" """\n""" """ <IfModule !authz_core_module>\n""" """ Order allow,deny\n""" """ Allow from All\n""" """ </IfModule>\n""" """</Directory>\n""" """\n""") % dict(static_directory=static_directory) apache_conf += ( """# Apache redirects the requests retrieved to the particular server, using a stickysession if the sessions are based on memory\n""" """ProxyPreserveHost On\n""" """ProxyVia On\n""" """\n""") for static_url, static_directory in static_directories.items(): apache_conf += """ProxyPass %(static_url)s !\n""" % dict(static_url=static_url) apache_conf += ( """\n""" """ProxyPass %(root)s/weblab/ balancer://%(root-no-slash)s_weblab_cluster/ stickysession=weblabsessionid lbmethod=bybusyness\n""" """ProxyPassReverse %(root)s/weblab/ balancer://%(root-no-slash)s_weblab_cluster/ stickysession=weblabsessionid\n""" "\n") apache_conf += "\n" apache_conf += """<Proxy balancer://%(root-no-slash)s_weblab_cluster>\n""" for pos, port in enumerate(ports): d = { 'port' : port, 'route' : 'route%s' % (pos+1), 'root' : '%(root)s' } apache_conf += """ BalancerMember http://localhost:%(port)s/weblab route=%(route)s\n""" % d apache_conf += """</Proxy>\n""" apache_img_dir = '/client/images' apache_root_without_slash = base_url[1:] if base_url.startswith('/') else base_url server_conf_dict = { 'root' : base_url, 'root-no-slash' : apache_root_without_slash.replace('/','_'), 'directory' : os.path.abspath(directory).replace('\\','/'), 'war_path' : data_filename('war').replace('\\','/') } apache_conf = apache_conf % server_conf_dict apache_conf_path = os.path.join('', 'apache_weblab_generic.conf') return apache_conf def _simple_httpd_generation(directory, base_url, ports, static_directories): proxy_paths = [ ('%(root)s$', 'redirect:%(root)s/weblab/'), ('%(root)s/$', 'redirect:%(root)s/weblab/'), ('%(root)s/weblab/client$', 'redirect:%(root)s/weblab/client/index.html'), ] for key, directory in static_directories.items(): proxy_paths.append((key, 'file:{0}'.format(directory))) proxy_path = "proxy-sessions:weblabsessionid:" for pos, port in enumerate(ports): d = { 'port' : port, 'route' : 'route%s' % (pos+1), 'root' : '%(root)s' } proxy_path += '%(route)s=http://localhost:%(port)s/weblab/,' % d proxy_paths.append(('%(root)s/weblab/', proxy_path)) proxy_paths.append(('%(root)s/weblab', 'redirect:%(root)s/weblab/')) proxy_paths.append(('', 'redirect:%(root)s/weblab/')) if base_url in ('','/'): root = '' else: root = base_url apache_img_dir = '/client/images' server_conf_dict = { 'root' : root, 'directory' : os.path.abspath(directory).replace('\\','/') } proxy_paths = eval(repr(proxy_paths) % server_conf_dict) proxy_paths_str = "PATHS = [ \n" for proxy_path in proxy_paths: proxy_paths_str += " %s,\n" % repr(proxy_path) proxy_paths_str += "]\n" return proxy_paths_str if __name__ == '__main__': httpd_config_generate("/tmp/foo")
nilq/baby-python
python
""" Copyright (c) 2020 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ import argparse import shlex import subprocess import sys import config import global_vars from deamon import Daemon from detector.tools.slow_sql import diagnosing from global_vars import * from utils import check_time_legality, check_port_occupancy, check_collector, check_db_alive sys.path.append(CURRENT_DIRNAME) __version__ = '1.0.0' __description__ = 'Anomaly-detection: a time series forecast and anomaly detection tool.' __epilog__ = """ epilog: the 'a-detection.conf' and 'metric_task.conf' will be read when the program is running, the location of them is: a-detection.conf: {detection}. metric_config: {metric_config}. """.format(detection=CONFIG_PATH, metric_config=METRIC_CONFIG_PATH) def usage(): return """ python main.py start [--role {{agent,collector,monitor}}] # start local service. python main.py stop [--role {{agent,collector,monitor}}] # stop local service. python main.py start [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector,monitor}}] # start the remote service. python main.py stop [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector, monitor}}] # stop the remote service. python main.py deploy [--user USER] [--host HOST] [--project-path PROJECT_PATH] # deploy project in remote host. python main.py diagnosis [--query] [--start_time] [--finish_time] # rca for slow SQL. python main.py show_metrics # display all monitored metrics(can only be executed on 'detector' machine). python main.py forecast [--metric-name METRIC_NAME] [--period] [--freq] [--forecast-method {{auto_arima, fbprophet}}] [--save-path SAVE_PATH] # forecast future trend of metric(can only be executed on 'detector' machine). """ def parse_args(): parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__description__, usage=usage(), epilog=__epilog__) parser.add_argument('mode', choices=['start', 'stop', 'deploy', 'show_metrics', 'forecast', 'diagnosis']) parser.add_argument('--user', help="User of remote server.") parser.add_argument('--host', help="IP of remote server.") parser.add_argument('--project-path', help="Project location in remote server.") parser.add_argument('--role', choices=['agent', 'collector', 'monitor'], help="Run as 'agent', 'collector', 'monitor'. " "Notes: ensure the normal operation of the openGauss in agent.") parser.add_argument('--metric-name', help="Metric name to be predicted, if this parameter is not provided, " "all metric in database will be predicted.") parser.add_argument('--query', help="target sql for RCA") parser.add_argument('--start_time', help="start time of query") parser.add_argument('--finish_time', help="finish time of query") parser.add_argument('--period', default=1, help="Forecast periods of metric, it should be integer" "notes: the specific value should be determined to the trainnig data." "if this parameter is not provided, the default value '100S' will be used.") parser.add_argument('--freq', default='S', help="forecast gap, time unit: " "S: Second, " "M: Minute, " "H: Hour, " "D: Day, " "W: Week. ") parser.add_argument('--forecast-method', default='auto_arima', help="Forecast method, default method is 'auto_arima'," "if want to use 'fbprophet', you should install fbprophet first.") parser.add_argument('--save-path', help='Save the results to this path using csv format, if this parameter is not provided,' ', the result wil not be saved.') parser.add_argument('-v', '--version', action='version') parser.version = __version__ return parser.parse_args() def forecast(args): from prettytable import PrettyTable from detector.algorithm import get_fcst_alg from detector.service.storage.sqlite_storage import SQLiteStorage from utils import StdStreamSuppressor display_table = PrettyTable() display_table.field_names = ['Metric name', 'Date range', 'Minimum', 'Maximum', 'Average'] database_dir = config.get('database', 'database_dir') if not args.forecast_method: forecast_alg = get_fcst_alg('auto_arima')() else: forecast_alg = get_fcst_alg(args.forecast_method)() def forecast_metric(name, train_ts, save_path=None): with StdStreamSuppressor(): forecast_alg.fit(timeseries=train_ts) dates, values = forecast_alg.forecast( period=int(args.period) + 1, freq=args.freq) date_range = "{start_date}~{end_date}".format(start_date=dates[0], end_date=dates[-1]) display_table.add_row( [name, date_range, min(values), max(values), sum(values) / len(values)] ) if save_path: if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) with open(save_path, mode='w') as f: for date, value in zip(dates, values): f.write(date + ',' + str(value) + '\n') for database in os.listdir(database_dir): with SQLiteStorage(os.path.join(database_dir, database)) as db: table_rows = db.get_table_rows('os_exporter') timeseries = db.get_timeseries(table='os_exporter', field=args.metric_name, period=table_rows) forecast_metric(args.metric_name, timeseries, args.save_path) print(display_table.get_string()) def slow_sql_rca(args): from prettytable import PrettyTable from detector.service.storage.sqlite_storage import SQLiteStorage from utils import input_sql_processing, remove_comment if not args.query: print('Error: no query input!') return user_query = args.query.split(';')[0] start_time = args.start_time finish_time = args.finish_time if start_time and not check_time_legality(start_time): print("error time format '{time}', using: {date_format}.".format(time=start_time, date_format=global_vars.DATE_FORMAT)) return if finish_time and not check_time_legality(finish_time): print("error time format '{time}', using: {date_format}.".format(time=finish_time, date_format=global_vars.DATE_FORMAT)) return database_dir = os.path.realpath(config.get('database', 'database_dir')) display_table = PrettyTable() display_table.field_names = ['database', 'start time', 'finish time', 'rca', 'suggestion'] display_table.align = 'l' for database in os.listdir(database_dir): if 'journal' in database: continue try: database_path = os.path.join(database_dir, database) with SQLiteStorage(database_path) as db: if start_time and finish_time: results = db.fetch_all_result( "select query, start_time, finish_time from wdr where start_time " "between '{start_time}' and '{finish_time}';".format( start_time=start_time, finish_time=finish_time)) elif start_time: results = db.fetch_all_result( "select query, start_time, finish_time from wdr where start_time >= '{margin_time}';".format( margin_time=start_time)) elif finish_time: results = db.fetch_all_result( "select query, start_time, finish_time from wdr where finish_time <= '{margin_time}';".format( margin_time=finish_time)) else: current_time = int(time.time()) # If not input start_time and finish_time, then default search for 12 hours of historical data. margin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(current_time - 43200)) results = db.fetch_all_result( "select query, start_time, finish_time from wdr where start_time >= '{margin_time}';".format( margin_time=margin_time)) if not results: continue for wdr_query, start_time, finish_time in results: try: processed_wdr_query = input_sql_processing(wdr_query).replace(' ', '') processed_user_query = input_sql_processing(user_query).replace(' ', '') if processed_user_query == processed_wdr_query: user_query = remove_comment(user_query) diagnose_result = diagnosing.diagnose_user(db, user_query, start_time) start_time, finish_time = diagnose_result[0], diagnose_result[1] rca_ana = "" suggestion_ana = "" if not diagnose_result[2:]: rca_ana = "the query has no slow features or its syntax is incorrect." suggestion_ana = "please check the query threshold, check the log, and analyze the reason." else: index = 1 for rca, suggestion in diagnose_result[2:]: rca_ana = rca_ana + "{index}: {rca}\n".format(index=index, rca=rca) suggestion_ana = suggestion_ana + "{index}: {suggestion}\n".format(index=index, suggestion=suggestion) index += 1 display_table.add_row([database, start_time, finish_time, rca_ana, suggestion_ana]) except Exception as e: # Prevent unknown accidents from causing the program to stop continue except Exception as e: print(str(e)) return print(display_table.get_string()) def deploy(args): print('Please input the password of {user}@{host}: '.format(user=args.user, host=args.host)) command = 'sh start.sh --deploy {host} {user} {project_path}' \ .format(user=args.user, host=args.host, project_path=args.project_path) if subprocess.call(shlex.split(command), cwd=BIN_PATH) == 0: print("\nExecute successfully.") else: print("\nExecute unsuccessfully.") def show_metrics(): from prettytable import PrettyTable from detector.service.storage.sqlite_storage import SQLiteStorage display_table = PrettyTable() display_table.field_names = ['Metric name', 'Current rows'] database_dir = config.get('database', 'database_dir') for database in os.listdir(database_dir): with SQLiteStorage(os.path.join(database_dir, database)) as db: table = 'os_exporter' fields = db.get_all_fields(table) rows = db.get_table_rows(table) for field in fields: display_table.add_row([field, rows]) print(display_table.get_string()) def manage_local_service(args): daemon = Daemon() daemon.set_stdout(os.devnull).set_stderr(os.devnull) if args.role == 'collector': from detector.service import service_main daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/collector.pid')) daemon.set_function(service_main) elif args.role == 'monitor': from detector.metric_detector import detector_main daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/detector.pid')) daemon.set_function(detector_main) elif args.role == 'agent': from agent.metric_agent import agent_main pre_check = check_collector() and check_db_alive(port=config.get('agent', 'db_port')) if args.mode == 'start' and not pre_check: print('FATAL: Agent process failed to start.', file=sys.stderr, flush=True) return daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/agent.pid')) daemon.set_function(agent_main) else: print('FATAL: incorrect parameter.') print(usage()) return if args.mode == 'start': if args.role == 'collector': listen_port = config.get('server', 'listen_port') check_port_occupancy(listen_port) daemon.start() else: daemon.stop() def manage_remote_service(args): print('Please input the password of {user}@{host}: '.format(user=args.user, host=args.host)) if args.mode == 'start': command = "sh start.sh --start_remote_service {host} {user} {project_path} {role}" \ .format(user=args.user, host=args.host, role=args.role, project_path=args.project_path) else: command = "sh stop.sh --stop_remote_service {host} {user} {project_path} {role}" \ .format(user=args.user, host=args.host, role=args.role, project_path=args.project_path) if subprocess.call(shlex.split(command), cwd=BIN_PATH) == 0: print("\nExecute successfully.") else: print("\nExecute unsuccessfully.") def main(): args = parse_args() if args.mode in ('start', 'stop') and all((args.user, args.host, args.project_path, args.role)): manage_remote_service(args) elif args.mode in ('start', 'stop') and args.role and not any((args.user, args.host, args.project_path)): manage_local_service(args) elif args.mode == 'deploy' and all((args.user, args.host, args.project_path)): deploy(args) elif args.mode == 'show_metrics': show_metrics() elif args.mode == 'forecast': forecast(args) elif args.mode == 'diagnosis': slow_sql_rca(args) else: print("FATAL: incorrect parameter.") print(usage()) return -1 if __name__ == '__main__': main()
nilq/baby-python
python
#!/usr/bin/env python from setuptools import setup, find_packages import os import shutil from subprocess import check_output setup( name="array_neutron_lbaas", description="Array vADC OpenStack Neutron LBaaS Device Driver", long_description=open("README.md").read(), version="1.0.0", url="https://www.arraynetworks.com.cn", packages=find_packages(), scripts=[ "scripts/array_lbaas_config_generator", "scripts/array_lbaas_init_db", "scripts/array_lbaas_init_network", "scripts/array_lbaas_tenant_customization" ], data_files=[ ("/etc/neutron/conf.d/neutron-server", ["conf/array_vapv_lbaas.conf"]), ("/etc/dhcp/octavia/", ["conf/dhclient.conf"]) ], license="Apache Software License", platforms=["Linux"], classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Environment :: OpenStack", "License :: OSI Approved :: Apache Software License" "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7" ] )
nilq/baby-python
python
# from typing import Optional # from discord import Embed # from discord.utils import get # from discord.ext.menus import MenuPages, ListPageSource # from discord.ext.commands import Cog, command # def syntax(command): # cmd_and_aliases = "|".join([str(command), *command.aliases]) # params = [] # for key, value in command.params.items(): # if key not in ("self", "ctx"): # params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>") # params = " ".join(params) # return f"```{cmd_and_aliases} {params}```" # class HelpMenu(ListPageSource): # def __init__(self, ctx, data): # self.ctx = ctx # super().__init__(data, per_page=5) # async def write_page(self, menu, fields=[]): # offset = (menu.current_page * self.per_page) + 1 # len_data = len(self.entries) # embed = Embed( # title="Help", # description="Welcome to the boNo help dialog!", # colour=self.ctx.author.colour, # ) # embed.set_thumbnail(url=self.ctx.guild.me.avatar_url) # embed.set_footer( # text=f"{offset:,} - {min(len_data, offset+self.per_page-1):,} of {len_data:,} commands." # ) # for name, value in fields: # embed.add_field(name=name, value=value, inline=False) # return embed # async def format_page(self, menu, entries): # fields = [] # for entry in entries: # fields.append((entry.brief or "No Description", syntax(entry))) # return await self.write_page(menu, fields) # class Help(Cog): # def __init__(self, bot): # self.bot = bot # self.bot.remove_command("help") # async def cmd_help(self, ctx, command): # embed = Embed( # title=f"Help with `{command}`", # description=syntax(command), # colour=ctx.author.colour, # ) # embed.add_field(name="Command Description", value=command.help) # await ctx.send(embed=embed) # @command(name="help") # async def show_help(self, ctx, cmd: Optional[str]): # """ # Helps to know commands better. # """ # if cmd is None: # menu = MenuPages( # source=HelpMenu(ctx, list(self.bot.commands)), # clear_reactions_after=True, # delete_message_after=True, # ) # await menu.start(ctx) # else: # if (command := get(self.bot.commands, name=cmd)) : # await self.cmd_help(ctx, command) # else: # await ctx.send( # "I don't follow that command. Perhaps that's an alias or not a command at all." # ) # @Cog.listener() # async def on_ready(self): # if not self.bot.ready: # self.bot.cogs_ready.ready_up("help") # def setup(bot): # bot.add_cog(Help(bot)) from discord.ext import commands from discord.ext.commands import Cog from utils.util import Pag class Help(Cog): def __init__(self, bot): self.bot = bot self.bot.remove_command("help") self.cmds_per_page = 6 def get_command_signature(self, command: commands.Command, ctx: commands.Context): aliases = "|".join(command.aliases) cmd_invoke = f"[{command.name}|{aliases}]" if command.aliases else command.name full_invoke = command.qualified_name.replace(command.name, "") signature = f"{ctx.prefix}{full_invoke}{cmd_invoke}" return signature async def return_filtered_commands(self, walkable, ctx): filtered = [] for c in walkable.walk_commands(): try: if c.hidden: continue elif c.parent: continue await c.can_run(ctx) filtered.append(c) except commands.CommandError: continue return self.return_sorted_commands(filtered) def return_sorted_commands(self, commandList): return sorted(commandList, key=lambda x: x.name) async def setup_help_pag(self, ctx, entity=None, title=None): entity = entity or self.bot title = title or self.bot.description pages = [] if isinstance(entity, commands.Command): filtered_commands = ( list(set(entity.all_commands.values())) if hasattr(entity, "all_commands") else [] ) filtered_commands.insert(0, entity) else: filtered_commands = await self.return_filtered_commands(entity, ctx) for i in range(0, len(filtered_commands), self.cmds_per_page): next_commands = filtered_commands[i : i + self.cmds_per_page] commands_entry = "" for cmd in next_commands: desc = cmd.short_doc or cmd.description signature = self.get_command_signature(cmd, ctx) subcommand = "Has subcommands" if hasattr(cmd, "all_commands") else "" commands_entry += ( f"• **__{cmd.name}__**\n```\n{signature}\n```\n{desc}\n" if isinstance(entity, commands.Command) else f"• **__{cmd.name}__**\n{desc}\n {subcommand}\n" ) pages.append(commands_entry) await Pag(title=title, color=0xCE2029, entries=pages, length=1).start(ctx) @commands.command( name="help", aliases=["h", "commands"], description="The help command. Duh!" ) async def help_command(self, ctx, *, entity=None): if not entity: await self.setup_help_pag(ctx) else: cog = self.bot.get_cog(entity) if cog: await self.setup_help_pag(ctx, cog, f"{cog.qualified_name}'s commands") else: command = self.bot.get_command(entity) if command: await self.setup_help_pag(ctx, command, command.name) else: await ctx.send("Entity not found.") @commands.Cog.listener() async def on_ready(self): if not self.bot.ready: self.bot.cogs_ready.ready_up("help") print("cog ready") def setup(bot): bot.add_cog(Help(bot))
nilq/baby-python
python
## heap # Time: O(NlogK) class Solution: def findKthLargest(self, nums: List[int], k: int) -> int: return heapq.nlargest(k, nums)[-1] ## sort # Time: O(NlogN) class Solution: def findKthLargest(self, nums: List[int], k: int) -> int: nums.sort() return nums[-k]
nilq/baby-python
python
from typing import Any from .metaf_base import DataDescriptor class Direction(DataDescriptor): def __init__(self, name: str): super().__init__(name) def _handler(self, value): return float(value) class Speed(DataDescriptor): def __init__(self, name: str): super().__init__(name) def _handler(self, value): return float(value) class Wind: __direction = Direction("direction") __speed = Speed("speed") __gust = Speed("gust") def __init__(self, group: str): self.__group = group self.__direction = group[:3] self.__speed = group[3:5] self.__gust = group[6:8] @property def direction_in_degrees(self): return self.__direction @property def direction_in_radians(self): return self.__direction * 3.14 / 180 @property def speed_in_mps(self): return self.__speed @property def speed_in_kph(self): return self.__speed * 3.6 @property def gust(self): return self.__gust
nilq/baby-python
python
import csv import numpy as np from scipy import signal import copy def getCsv(txtFileName='seventeenth.txt'): with open(txtFileName) as csv_file: csv_reader = csv.reader(csv_file, delimiter=' ') return list(csv_reader) def parseCharacter(character): value = 1 if character == '#' else 0 return value def parseInput(csvFile): return [[parseCharacter(character) for character in (list(row[0]))] for row in csvFile] def prepareInitialArray(input, plannedIterationSteps): inputArray = np.array(input) inputArrayShape = list(np.shape(inputArray)) initialArrayShapeXAxis = inputArrayShape[0] + 2 * plannedIterationSteps initialArrayShapeYAxis = inputArrayShape[1] + 2 * plannedIterationSteps initialArrayShapeZAxis = 1 + 2 * plannedIterationSteps initialArrayShape = [initialArrayShapeXAxis, initialArrayShapeYAxis, initialArrayShapeZAxis] initialArray = np.zeros(initialArrayShape) initialArray[plannedIterationSteps:plannedIterationSteps + inputArrayShape[0], plannedIterationSteps:plannedIterationSteps + inputArrayShape[1], plannedIterationSteps] = inputArray return initialArray def determineConfiguration(initialState, iterationSteps): recentState = copy.deepcopy(initialState) summationFilter = np.ones((3, 3, 3)) summationFilter[1, 1, 1] = 0 for counter in range(iterationSteps): summationArray = signal.convolve(recentState, summationFilter, 'same', 'direct') sumIsThree = summationArray == 3 sumIsNotTwoOrThree = np.logical_not(np.logical_or(summationArray == 2, summationArray == 3)) recentState[np.logical_and(recentState == 0, sumIsThree)] = 1 recentState[np.logical_and(recentState == 1, sumIsNotTwoOrThree)] = 0 return recentState csvFile = getCsv() providedInput = parseInput(csvFile) iterationSteps = 6 initialArray = prepareInitialArray(providedInput, iterationSteps) finalState = determineConfiguration(initialArray, iterationSteps) print(np.sum(finalState))
nilq/baby-python
python
from gi.repository import Gtk import asyncio import threading class ThreadLoop(threading.Thread): def __init__(self, loop): threading.Thread.__init__(self) self.loop = loop def run(self): print("starting Thread") self.loop.run_forever() print("Ending Thread") class ClientProtocol(asyncio.Protocol): def __init__(self, text_buf, loop): self.text_buf = text_buf self.loop = loop self.trasport = None def connection_made(self, transport): self.transport = transport def data_received(self, data): iter_end = self.text_buf.get_end_iter() self.text_buf.insert(iter_end, "\n{}".format(data.decode())) def connection_lost(self, exc): iter_end = self.text_buf.get_end_iter() self.text_buf.insert(iter_end, "\n disconnected") self.transport.close() print("transport has closed") #print(dir(self.loop)) print("self.loop.stop()") print(self.loop.stop()) def send_msg(self, message): self.transport.write(message.encode()) class Handler: def __init__(self, window, text_entry, text_box): self.window = window self.text_entry = text_entry self.text_box = text_box self.text_buf = self.text_box.get_buffer() self.window.connect('delete-event', self.quit) self.loop = None def _send_msg(self, msg): self.transport.write(msg.encode()) @property def _can_send_msg(self): result = False if self.loop: if self.loop.is_running(): result = True else: self.loop = None return result def connect_to_server(self, address=('127.0.0.1', 3333)): self.loop = asyncio.get_event_loop() coro = self.loop.create_connection(lambda: ClientProtocol( self.text_buf, self.loop), '127.0.0.1', 3333) self.transport, self.protocol = self.loop.run_until_complete(coro) self.thread = ThreadLoop(self.loop) self.thread.start() def connect_button_clicked(self, widget): print("connect button clicked") if not self._can_send_msg: self.connect_to_server() def send_button_clicked(self, widget): print("sending") text = self.text_entry.get_text() # end_iter = self.text_buf.get_end_iter() if self._can_send_msg: self._send_msg(text) def quit(self, *args): print("quit!!!!") print(args) if self._can_send_msg: self._send_msg("/disconnect") Gtk.main_quit() builder = Gtk.Builder() builder.add_from_file("chat_test.glade") window = builder.get_object("window1") text_entry = builder.get_object("text_entry") text_box = builder.get_object("textbox") builder.connect_signals(Handler(window, text_entry, text_box)) window.show_all() Gtk.main()
nilq/baby-python
python
#coding: utf-8 if __name__ == '__main__': st = "data/mult/result" print st t = open("data/resultX.csv","w") t.write("Id,Tags\n") a = 0 for i in range(21): print i f = open(st+str(i)+".csv") h = f.readlines() temp = 0 print len(h) for line in h: t.write(line) a += 1 temp += 1 print "temp",temp print "total",a
nilq/baby-python
python
"""Sweep tests""" import pytest import wandb def test_create_sweep(live_mock_server, test_settings): live_mock_server.set_ctx({"resume": True}) sweep_config = { "name": "My Sweep", "method": "grid", "parameters": {"parameter1": {"values": [1, 2, 3]}}, } sweep_id = wandb.sweep(sweep_config) assert sweep_id == "test" def test_minmax_validation(): api = wandb.apis.InternalApi() sweep_config = { "name": "My Sweep", "method": "random", "parameters": {"parameter1": {"min": 0, "max": 1}}, } filled = api.api._validate_config_and_fill_distribution(sweep_config) assert "distribution" in filled["parameters"]["parameter1"] assert "int_uniform" == filled["parameters"]["parameter1"]["distribution"] sweep_config = { "name": "My Sweep", "method": "random", "parameters": {"parameter1": {"min": 0.0, "max": 1.0}}, } filled = api.api._validate_config_and_fill_distribution(sweep_config) assert "distribution" in filled["parameters"]["parameter1"] assert "uniform" == filled["parameters"]["parameter1"]["distribution"] sweep_config = { "name": "My Sweep", "method": "random", "parameters": {"parameter1": {"min": 0.0, "max": 1}}, } with pytest.raises(ValueError): api.api._validate_config_and_fill_distribution(sweep_config)
nilq/baby-python
python
import os import signal from abc import ABCMeta, abstractmethod from multiprocessing import Pool from django.conf import settings import pymei import solr DEFAULT_MIN_GRAM = 2 DEFAULT_MAX_GRAM = 10 class AbstractMEIConverter: __metaclass__ = ABCMeta TYPE = "cantusdata_music_notation" def __init__(self, file_name, siglum_slug, manuscript_id, min_gram=DEFAULT_MIN_GRAM, max_gram=DEFAULT_MAX_GRAM): self.file_name = file_name self.siglum_slug = siglum_slug self.manuscript_id = manuscript_id self.min_gram = min_gram self.max_gram = max_gram self.doc = pymei.documentFromFile(str(file_name), False).getMeiDocument() self.page_number = getPageNumber(file_name) solrconn = solr.SolrConnection(settings.SOLR_SERVER) self.image_uri = getImageURI(file_name, manuscript_id, solrconn) @classmethod def convert(cls, directory, siglum_slug, id, processes=None, **options): mei_files = cls._get_file_list(directory) if processes == 0: processed = cls._process_in_sequence(mei_files, siglum_slug, id, **options) else: processed = cls._process_in_parallel(mei_files, siglum_slug, id, processes=processes, **options) return mei_files, processed @classmethod def _get_file_list(cls, directory): """Generate a list of files to process""" mei_files = [] for root, dirs, files in os.walk(directory): # Skip .git directories try: git_index = dirs.index('.git') except ValueError: pass else: del dirs[git_index] for f in files: if f.startswith("."): continue if os.path.splitext(f)[1] == '.mei': mei_files.append(os.path.join(root, f)) mei_files.sort() return mei_files @classmethod def _process_in_sequence(cls, mei_files, siglum_slug, id, **options): for file_name in mei_files: ngrams = cls.process_file(file_name, siglum_slug, id, **options) yield file_name, ngrams @classmethod def _process_in_parallel(cls, mei_files, siglum_slug, id, processes, **options): pool = Pool(initializer=init_worker, processes=processes) args = ((cls, file_name, siglum_slug, id, options) for file_name in mei_files) return pool.imap(process_file_in_worker, args) @classmethod def process_file(cls, file_name, siglum_slug, id, **options): inst = cls(file_name, siglum_slug, id, **options) return inst.process() @abstractmethod def process(self): raise NotImplementedError('process()') def init_worker(): # Allow KeyboardInterrupt to propagate to the parent process signal.signal(signal.SIGINT, signal.SIG_IGN) def process_file_in_worker(params): cls, file_name, siglum_slug, id, options = params ngrams = list(cls.process_file(file_name, siglum_slug, id, **options)) return file_name, ngrams def getPageNumber(ffile): """ Extract the page number from the file name :param ffile: :return: image URI as a string """ return str(ffile).split('_')[-1].split('.')[0] def getImageURI(ffile, manuscript_id, solrconn): """ Extract the page number from the file name and get the corresponding image URI from Solr :param ffile: :param manuscript_id: :param solrconn: :return: image URI as a string """ # Send the value of the folio name to Solr and get the corresponding URI folio_name = getPageNumber(ffile) composed_request = u'type:"cantusdata_folio" AND manuscript_id:{0} AND number:{1}' \ .format(manuscript_id, folio_name) result = solrconn.query(composed_request, rows=1, fields=['image_uri']) return result.results[0]['image_uri']
nilq/baby-python
python
import sys import atlednolispe_settings # private_password from .base import * DEBUG = False DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': atlednolispe_settings.DATABASE_NAME, 'USER': atlednolispe_settings.USER, 'PASSWORD': atlednolispe_settings.PASSWORD, 'HOST': atlednolispe_settings.HOST, 'PORT': '3306', 'CONN_MAX_AGE': 60, # like connect pool } } THEME_DIR = 'themes' THEME_TYPE = 'html5up' THEME = os.path.join(THEME_DIR, THEME_TYPE) SITE_PACKAGES = [s_p for s_p in sys.path if s_p.endswith('site-packages')][0] TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates', THEME), os.path.join(SITE_PACKAGES, 'xadmin/templates'), os.path.join(SITE_PACKAGES, 'crispy_forms/templates'), os.path.join(SITE_PACKAGES, 'reversion/templates'), os.path.join(SITE_PACKAGES, 'ckeditor/templates'), os.path.join(SITE_PACKAGES, 'ckeditor_uploader/templates'), os.path.join(SITE_PACKAGES, 'rest_framework/templates'), ], 'APP_DIRS': False, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'libraries': { 'filters': 'templatetags.filters' } }, }, ] STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), os.path.join(SITE_PACKAGES, 'rest_framework/static'), ] CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://127.0.0.1:6379/1", "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", "PARSER_CLASS": "redis.connection.HiredisParser", } } } CKEDITOR_CONFIGS = { 'awesome_ckeditor': { # set the name of the config 'toolbar': 'Full', 'height': 300, # 'width': 1200, 'tabSpaces': 4, }, } DEFAULT_FILE_STORAGE = 'blog.storage.MyStorage' # django-debug-toolbar & silk if DEBUG: TEMPLATES[0]['DIRS'] += [ os.path.join(SITE_PACKAGES, 'debug_toolbar/templates'), os.path.join(SITE_PACKAGES, 'silk/templates'), ] INSTALLED_APPS += [ 'debug_toolbar', 'silk', ] MIDDLEWARE += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', 'silk.middleware.SilkyMiddleware', ] INTERNAL_IPS = ['127.0.0.1'] # debug-toolbar SILKY_PYTHON_PROFILER = True else: ALLOWED_HOSTS = [ # required if DEBUG = False atlednolispe_settings.ALLOWED_HOST1, '127.0.0.1', ]
nilq/baby-python
python
from xml.dom.minidom import Document, parseString from xml.parsers.expat import ExpatError import pytest from sunpy.util import xml def test_xml_to_dict1(): """ should return dict of xml string. """ source_xml = "<outer>\ <inner1>one</inner1>\ <inner2>two</inner2>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'inner2': u'two', u'inner1': u'one'}} assert xml_dict == expected_dict def test_xml_to_dict2(): """ should return dict of xml string and if a tag is duplicated it takes the last one. """ source_xml = "<outer>\ <inner1>one-one</inner1>\ <inner1>one-two</inner1>\ <inner2>two-one</inner2>\ <inner2>two-two</inner2>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'inner2': u'two-two', u'inner1': u'one-two'}} assert xml_dict == expected_dict def test_xml_to_dict3(): """ should return dict of xml string with empty value if there are no inner elements. """ source_xml = "<outer/>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': ''} assert xml_dict == expected_dict def test_xml_to_dict4(): """ should return dict of xml string with empty value if there are no inner elements. """ source_xml = "<outer></outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': ''} assert xml_dict == expected_dict def test_xml_to_dict5(): """ should return dict of xml string with 2 layer nesting. """ source_xml = "<outer>\ <mid1>\ <inner1>one-one</inner1>\ </mid1>\ <mid2>\ <inner2>two-one</inner2>\ </mid2>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'mid2': {u'inner2': u'two-one'}, u'mid1': {u'inner1': u'one-one'}}} assert xml_dict == expected_dict def test_xml_to_dict6(): """ should return dict of xml string with 2 layer nesting and if a tag is duplicated it takes the last one. """ source_xml = "<outer>\ <mid>\ <inner1>one-one</inner1>\ </mid>\ <mid>\ <inner2>two-one</inner2>\ </mid>\ </outer>" xml_dict = xml.xml_to_dict(source_xml) expected_dict = {u'outer': {u'mid': {u'inner2': u'two-one'}}} assert xml_dict == expected_dict def test_xml_to_dict7(): """ should raise TypeError when passed None. """ assert pytest.raises(TypeError, xml.xml_to_dict, None) def test_xml_to_dict8(): """ should raise TypeError when passed non string. """ assert pytest.raises(TypeError, xml.xml_to_dict, 9) def test_xml_to_dict9(): """ should raise ExpatError when passed empty string. """ assert pytest.raises(ExpatError, xml.xml_to_dict, "") def test_xml_to_dict10(): """ should raise ExpatError when passed space. """ assert pytest.raises(ExpatError, xml.xml_to_dict, " ") def test_get_node_text1(): """ should raise NotTextNodeError if there is a non text node. """ doc = Document() outer = doc.createElement("outer") doc.appendChild(outer) pytest.raises(xml.NotTextNodeError, xml.get_node_text, doc) def test_get_node_text2(): """ should return empty string for a node with no child nodes. """ assert xml.get_node_text(Document()) == "" def test_get_node_text3(): """ should return node text. """ node = parseString("<outer>one</outer>") text_node = node.childNodes[0] assert xml.get_node_text(text_node) == "one" def test_get_node_text4(): """ should raise AttributeError when sent None. """ assert pytest.raises(AttributeError, xml.get_node_text, None) def test_get_node_text5(): """ should raise AttributeError when sent wrong type. """ assert pytest.raises(AttributeError, xml.get_node_text, "wrong type") def test_node_to_dict1(): """ should return dict of node. """ doc = Document() outer = doc.createElement("outer") doc.appendChild(outer) inner1 = doc.createElement("inner1") inner2 = doc.createElement("inner2") outer.appendChild(inner1) outer.appendChild(inner2) inner1_text = doc.createTextNode("one") inner2_text = doc.createTextNode("two") inner1.appendChild(inner1_text) inner2.appendChild(inner2_text) expected_dict = {'outer': {'inner2': 'two', 'inner1': 'one'}} xml_dict = xml.node_to_dict(doc) assert xml_dict == expected_dict def test_node_to_dict2(): """ should return dict of node double nested. """ doc = Document() outer = doc.createElement("outer") doc.appendChild(outer) mid1 = doc.createElement("mid1") outer.appendChild(mid1) mid2 = doc.createElement("mid2") outer.appendChild(mid2) inner1 = doc.createElement("inner1") inner2 = doc.createElement("inner2") mid1.appendChild(inner1) mid2.appendChild(inner2) inner1_text = doc.createTextNode("one") inner2_text = doc.createTextNode("two") inner1.appendChild(inner1_text) inner2.appendChild(inner2_text) expected_dict = {'outer': {'mid2': {'inner2': 'two'}, 'mid1': {'inner1': 'one'}}} xml_dict = xml.node_to_dict(doc) assert xml_dict == expected_dict def test_node_to_dict3(): """ should return empty dict when sent empty doc. """ expected_dict = {} xml_dict = xml.node_to_dict(Document()) assert xml_dict == expected_dict def test_node_to_dict4(): """ should raise AttributeError when sent wrong type. """ assert pytest.raises(AttributeError, xml.node_to_dict, 9) def test_node_to_dict5(): """ should raise AttributeError when sent None. """ assert pytest.raises(AttributeError, xml.node_to_dict, None) def test_with_multiple_children_in_list(): """ Setting the 'multiple' attribute of parent node should put child nodes in a list. """ def getChild(lst_of_children, key, value): for child in lst_of_children: if child[key] == value: return child raise ValueError("No children with key {0} set to {1} found.".format(key, value)) source = '''<?xml version="1.0" encoding="UTF-8"?> <Config> <Name>With multiple children</Name> <Children multiple="true"> <Child> <Name>First Child</Name> <Value>Value 1</Value> </Child> <Child> <Name>Second Child</Name> <Value>Value 2</Value> </Child> </Children> </Config>''' expected = {'Config': {'Children': [{'Name': 'First Child', 'Value': 'Value 1'}, {'Name': 'Second Child', 'Value': 'Value 2'}], 'Name': 'With multiple children'}} actual = xml.xml_to_dict(source) assert len(expected['Config']) == len(actual['Config']) assert expected['Config']['Name'] == actual['Config']['Name'] assert len(actual['Config']['Children']) == 2 # As the child dictionaries are in lists we cannot be certain what order # they are in. Test individualy. expected_children = expected['Config']['Children'] actual_children = actual['Config']['Children'] expected_first_child = getChild(expected_children, key='Name', value='First Child') actual_first_child = getChild(actual_children, key='Name', value='First Child') assert expected_first_child == actual_first_child expected_second_child = getChild(expected_children, key='Name', value='Second Child') actual_second_child = getChild(actual_children, key='Name', value='Second Child') assert expected_second_child == actual_second_child
nilq/baby-python
python
"""Interface of RLAlgorithm.""" import abc class RLAlgorithm(abc.ABC): """Base class for all the algorithms. Note: If the field sampler_cls exists, it will be by Trainer.setup to initialize a sampler. """ # pylint: disable=too-few-public-methods @abc.abstractmethod def train(self, trainer): """Obtain samplers and start actual training for each epoch. Args: trainer (Trainer): Trainer is passed to give algorithm the access to trainer.step_epochs(), which provides services such as snapshotting and sampler control. """
nilq/baby-python
python
import data import copy, logging import numpy as np def minimize_states_and_actions_to_iterate(): logging.info("Minimizing states and actions to iterate for each engine type...") for engine_subtype in data.engine_subtypes: num_working_engines = data.engines_info[engine_subtype]['NUM_WORKING_ENGINES'] current_state = data.engines_info[engine_subtype]['CURRENT_STATE'][:] if num_working_engines > 3: data.states_by_subtype[engine_subtype] = get_unique_list_of_lists(minimize_states(current_state, num_working_engines))[:] data.actions_by_subtype[engine_subtype] = minimize_actions(current_state, num_working_engines) else: data.states_by_subtype[engine_subtype] = get_unique_list_of_lists(data.all_possible_states[num_working_engines])[:] data.actions_by_subtype[engine_subtype] = data.all_possible_actions[num_working_engines][:] logging.info("The number of states and actions to iterate have been minimized.") def get_unique_list_of_lists(a_list): unique_list_of_lists = [] for l in a_list: if l not in unique_list_of_lists: unique_list_of_lists.append(l) return unique_list_of_lists def minimize_states(current_state, num_working_engines): max_num_engines_currently_at_any_hub = max(current_state) all_states = data.all_possible_states[num_working_engines] states_minimized = [] if max_num_engines_currently_at_any_hub > 1: # If at least one hub currently has more than 1 engine num_hubs_with_max_num_engines = current_state.count(max_num_engines_currently_at_any_hub) if num_hubs_with_max_num_engines > 1: # If more than one hub currently has more than 1 engine indices_of_hubs_with_max_num_engines = [i for i, num in enumerate(current_state) if num == max_num_engines_currently_at_any_hub] indices_of_hubs_with_max_num_engines.sort() for state in all_states: # For every possible state being considered state_to_edit = state[:] num_engines_at_hubs_with_max_num_engines = [] for i in reversed(indices_of_hubs_with_max_num_engines): num_engines_at_hubs_with_max_num_engines.append(state_to_edit.pop(i)) # If at least 1 engine is at each hub with maximum number of engines allowed AND all other hubs have 3 or less engines if all(num >= 1 for num in num_engines_at_hubs_with_max_num_engines) and (max(state_to_edit) <= 3): states_minimized.append(state) else: # If one hub currently has more than 1 engine index_of_hub_with_max_num_engines = current_state.index(max_num_engines_currently_at_any_hub) for state in all_states: # For every possible state being considered state_to_edit = state[:] num_at_hub_with_max_num_engines = state_to_edit.pop(index_of_hub_with_max_num_engines) # If at least 1 engine is at hub with maximum number of engines allowed AND all other hubs have 3 or less engines if (num_at_hub_with_max_num_engines >= 1) and (max(state_to_edit) <= 3): states_minimized.append(state) else: # If there is max 1 engine currently at any hub for state in all_states: if max(state) <= 3: # If no more than 3 engines are at any one hub for the new state states_minimized.append(state) return states_minimized def minimize_actions(current_state, num_working_engines): all_actions = data.all_possible_actions[num_working_engines][:] actions_minimized = [] for action in all_actions: current_state_to_edit = current_state[:] valid = True for engine_from in range(7): for engine_to in range(7): if valid: num_engines_to_move = action[engine_from][engine_to] # If the current index indicates engines are moved from one hub to another if num_engines_to_move > 0: num_engines_at_current_hub = current_state_to_edit[engine_from] # If the number of engines at the hub to move engines from is equal to zero if num_engines_at_current_hub == 0: valid = False # The action is not valid # If the number of engines to move from the hub is greater than the number of engines at the hub elif num_engines_to_move > num_engines_at_current_hub: valid = False # The action is not valid else: # Edit the current state to reflect the engines being moved from the hub current_state_to_edit[engine_from] -= num_engines_to_move if valid: actions_minimized.append(action) actions_minimized = np.array(actions_minimized) return actions_minimized def validate_removal_and_engine_info(): for engine_subtype in data.engine_subtypes: assert (engine_subtype in data.aos_cost), "No AOS cost was provided for " + engine_subtype + " in the removal_info file. Please provide ALL info for this engine subtype in the removal_info file." assert (data.aos_cost[engine_subtype] > 0), "AOS cost for " + engine_subtype + " is not set to a positive value. Please provide a positive value indicating the expected AOS cost for this engine type in the removal_info file." assert (engine_subtype in data.engines_info), "No engine data was provided for " + engine_subtype + " in the engine_info file. Please provide ALL info for this engine subtype in the engine_info file." assert (data.engines_info[engine_subtype]['TOTAL_NUM_ENGINES'] <= 5), "The program is limited to running only for engine types with 5 or less total engines. The " + engine_subtype + " has more than 5 engines." total_engines = data.engines_info[engine_subtype]['NUM_WORKING_ENGINES'] + data.engines_info[engine_subtype]['NUM_BROKEN_ENGINES_ATL'] + data.engines_info[engine_subtype]['NUM_BROKEN_ENGINES_MSP'] assert (data.engines_info[engine_subtype]['TOTAL_NUM_ENGINES'] == total_engines), "The total number of engines does not equal the sum of engines working, engines broken at ATL, and engines broken at MSP for the " + engine_subtype + ". Make sure the value in the TOTAL_NUM_ENGINES column is equal to the sum of values in the TOTAL_NUM_WORKING, NUM_BROKEN_ATL, and NUM_BROKEN_MSP columns." assert (data.engines_info[engine_subtype]['NUM_WORKING_ENGINES'] == sum(data.engines_info[engine_subtype]['CURRENT_STATE'])), "The number of working engines does not equal the sum of engines currently at each hub for the " + engine_subtype + ". Make sure the value in the TOTAL_NUM_WORKING column is equal to the sum of values in the NUM_WORKING columns for each hub." def validate_engine_subtype_data(): pass
nilq/baby-python
python
import csv, sys if (len(sys.argv) != 6): print("format: python3 join_csv.py OUT-FILE FILE-1 KEY-INDEX-1 FILE-2 KEY-INDEX-2") exit() with open(sys.argv[2], 'rb') as file: reader = csv.reader(file, delimiter=",", quotechar='"') with open(sys.argv[4]) as file2: reader2 = csv.reader(file2, delimiter=",", quotechar='"') for a1 in reader: for a2 in reader2: a = reader[a1] aa = reader[a2] if a[int(sys.argv[3])] == aa[int(sys.argv[5])]: a.extend(aa[int(sys.argv[5])+1:]) with open(sys.argv[1], 'wb') as csvfile: print(reader) spamwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(a) continue
nilq/baby-python
python
#!/usr/local/bin/python3 import boto3 from botocore.client import Config from botocore.vendored.requests.exceptions import ReadTimeout import traceback import json import sys from run_cumulus_task import run_cumulus_task import requests, zipfile, io client = boto3.client('stepfunctions', region_name = 'us-east-1') lambda_client = boto3.client('lambda', region_name = 'us-east-1') def handler(function, event, context): """handler that is provided to aws lambda""" return run_cumulus_task(function, event, context, {}) def get_lambda_function(lambda_arn): lambda_function = lambda_client.get_function(FunctionName=lambda_arn) lambda_code_url = lambda_function['Code']['Location'] r = requests.get(lambda_code_url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall('.') module_str, function_str = lambda_function['Configuration']['Handler'].split('.') task = __import__(module_str) return getattr(task, function_str) def step_function_handler(handler, activity_arn, lambda_arn): """ This function polls AWS Step Functions for new activities and run the process function for each message in activities """ print('ics querying for task from %s' % activity_arn) # poll for new activities try: response = client.get_activity_task(activityArn=activity_arn) print('Received an activity. Processing it') except ReadTimeout: return except Exception as e: print('Activity Read error (%s). Trying again.' % str(e)) return task_token = response.get('taskToken', None) output=None if task_token: try: function = get_lambda_function(lambda_arn) input = json.loads(response.get('input', '{}')) output = json.dumps(handler(function=function, event=input, context={})) return client.send_task_success(taskToken=task_token, output=output) except Exception as e: err = str(e) print("Exception when running task: %s" % err) tb = traceback.format_exc() err = (err[252] + ' ...') if len(err) > 252 else err client.send_task_failure(taskToken=task_token, error=err, cause=tb) else: print('No activity found') def poll(activity_arn, lambda_arn): config = Config(read_timeout=70) print('outside of the loop') # loop forever while True: step_function_handler(handler, activity_arn, lambda_arn)
nilq/baby-python
python
##################################################################### # # # SkillsFuture IBM Cloud Function Example 2 # # This example is used to show how to get data from Discovery # # and return it to Watson Assistant. # # # # input JSON: { "text": "What is Barn Town?"} # # # # WL IBM - 17 July 2019 # # # ##################################################################### import os import sys try: from ibm_cloud import DiscoveryV1 except ImportError: from watson_developer_cloud import DiscoveryV1 def MakeReturnMessage(results): messageback = "Here are some answers from search:<br>\n" counter = 0 for aresult in results: counter = counter + 1 messageback = messageback + "<b>" + str(counter) + "</b> " + aresult["text"] + "<br>\n" return messageback def main(dict): #create defaults for our variable text = "" #first, lets deconstruct the input variable if "text" in dict: text = dict["text"] #then create the discovery object, please choose the right version. discovery = "" if 'username' in dict: discovery = DiscoveryV1(version=dict['version'], url=dict['url'], username=dict['username'], password=dict['password']) elif 'iam_apikey' in os.environ: discovery = DiscoveryV1(version=dict['version'], url=dict['url'], iam_apikey=dict['iam_apikey'] ) else: return { 'text': 'Error: Disc. Creds not specified!' } #query discovery get_disc = discovery.query(dict['envid'], dict['colid'], natural_language_query=text, count=3) #get results get_results = get_disc.get_result()['results'] #make the output message messageback = "" if len(get_results) > 0: messageback = MakeReturnMessage(get_results) else: messageback = "I am sorry, there are no results from search. Please try another question" #craft the output result = {"text":messageback} return result
nilq/baby-python
python
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains tests for the logical behaviour of the tac negotiation skill.""" import copy from pathlib import Path from unittest.mock import patch from aea.decision_maker.gop import GoalPursuitReadiness, OwnershipState, Preferences from aea.helpers.preference_representations.base import ( linear_utility, logarithmic_utility, ) from aea.test_tools.test_skill import BaseSkillTestCase from packages.fetchai.skills.tac_control.helpers import ( determine_scaling_factor, generate_utility_params, ) from packages.fetchai.skills.tac_negotiation.dialogues import FipaDialogue from packages.fetchai.skills.tac_negotiation.strategy import Strategy from tests.conftest import ROOT_DIR class TestLogical(BaseSkillTestCase): """Logical Tests for tac negotiation.""" path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation") @classmethod def setup(cls): """Setup the test class.""" tac_dm_context_kwargs = { "goal_pursuit_readiness": GoalPursuitReadiness(), "ownership_state": OwnershipState(), "preferences": Preferences(), } super().setup(dm_context_kwargs=tac_dm_context_kwargs) cls.register_as = "both" cls.search_for = "both" cls.is_contract_tx = False cls.ledger_id = "some_ledger_id" cls.location = {"longitude": 0.1270, "latitude": 51.5194} cls.search_radius = 5.0 cls.service_key = "tac_service" cls.strategy = Strategy( register_as=cls.register_as, search_for=cls.search_for, is_contract_tx=cls.is_contract_tx, ledger_id=cls.ledger_id, location=cls.location, service_key=cls.service_key, search_radius=cls.search_radius, name="strategy", skill_context=cls._skill.skill_context, ) cls.sender = "some_sender_address" cls.counterparty = "some_counterparty_address" cls.mocked_currency_id = "12" cls.mocked_currency_amount = 2000000 cls.mocked_amount_by_currency_id = { cls.mocked_currency_id: cls.mocked_currency_amount } cls.mocked_good_ids = ["13", "14", "15", "16", "17", "18", "19", "20", "21"] cls.mocked_good_quantities = [5, 7, 4, 3, 5, 4, 3, 5, 6] cls.mocked_quantities_by_good_id = dict( zip(cls.mocked_good_ids, cls.mocked_good_quantities) ) cls.mocked_ownership_state = ( cls._skill.skill_context.decision_maker_handler_context.ownership_state ) cls.mocked_ownership_state.set( cls.mocked_amount_by_currency_id, cls.mocked_quantities_by_good_id ) cls.exchange_params_by_currency_id = {cls.mocked_currency_id: 1.0} cls.utility_params_by_good_id = generate_utility_params( [cls._skill.skill_context.agent_address], cls.mocked_good_ids, determine_scaling_factor(cls.mocked_currency_amount), )[cls._skill.skill_context.agent_address] cls.mocked_preferences = ( cls._skill.skill_context.decision_maker_handler_context.preferences ) cls.mocked_preferences.set( exchange_params_by_currency_id=cls.exchange_params_by_currency_id, utility_params_by_good_id=cls.utility_params_by_good_id, ) @staticmethod def _calculate_score(preferences, ownership_state): """Calculate the score given a preferences and an ownership_state object.""" goods_score = logarithmic_utility( preferences.utility_params_by_good_id, ownership_state.quantities_by_good_id, ) money_score = linear_utility( preferences.exchange_params_by_currency_id, ownership_state.amount_by_currency_id, ) return goods_score + money_score def test_generated_proposals_increase_score_seller(self): """Test whether the proposals generated by _generate_candidate_proposals method of the Strategy class actually increases agent's score where role is seller.""" # setup is_searching_for_sellers = True # operation with patch.object( self.skill.skill_context.transactions, "ownership_state_after_locks", return_value=self.mocked_ownership_state, ) as mock_ownership: actual_proposals = self.strategy._generate_candidate_proposals( is_searching_for_sellers ) # after mock_ownership.assert_any_call(is_seller=is_searching_for_sellers) current_score = self._calculate_score( self.mocked_preferences, self.mocked_ownership_state ) for proposal in actual_proposals: # applying proposal on a new ownership_state terms = self.strategy.terms_from_proposal( proposal, self.sender, self.counterparty, FipaDialogue.Role.SELLER ) new_ownership_state = copy.copy(self.mocked_ownership_state) new_ownership_state.apply_delta( terms.amount_by_currency_id, terms.quantities_by_good_id ) # new score new_score = self._calculate_score( self.mocked_preferences, new_ownership_state ) assert new_score >= current_score def test_generated_proposals_increase_score_buyer(self): """Test whether the proposals generated by _generate_candidate_proposals method of the Strategy class actually increases agent's score where role is buyer.""" # setup is_searching_for_sellers = False # operation with patch.object( self.skill.skill_context.transactions, "ownership_state_after_locks", return_value=self.mocked_ownership_state, ) as mock_ownership: actual_proposals = self.strategy._generate_candidate_proposals( is_searching_for_sellers ) # after mock_ownership.assert_any_call(is_seller=is_searching_for_sellers) current_score = self._calculate_score( self.mocked_preferences, self.mocked_ownership_state ) for proposal in actual_proposals: # applying proposal on a new ownership_state terms = self.strategy.terms_from_proposal( proposal, self.sender, self.counterparty, FipaDialogue.Role.BUYER ) new_ownership_state = copy.copy(self.mocked_ownership_state) new_ownership_state.apply_delta( terms.amount_by_currency_id, terms.quantities_by_good_id ) # new score new_score = self._calculate_score( self.mocked_preferences, new_ownership_state ) assert new_score >= current_score
nilq/baby-python
python
import requests import shutil import csv import os def writeToFile(directory, filename, filecontent): if directory: try: os.mkdir(directory) except: pass else: directory = "" with open(os.path.join(directory, filename), 'wb') as f: filecontent.raw.decode_content = True shutil.copyfileobj(filecontent.raw, f) directory = "data" instituicoesURL = "https://sisu-api.apps.mec.gov.br/api/v1/oferta/instituicoes" response = requests.get(instituicoesURL).json() instituicoes = [r["co_ies"] for r in response] baseURL = "https://sisu.mec.gov.br/static/listagem-alunos-aprovados-portal/" baseFilename = "listagem-alunos-aprovados-ies-{}-{}.csv" for i, instituicao in enumerate(instituicoes): termoAdesaoURL = "https://sisu-api.apps.mec.gov.br/api/v1/oferta/instituicao/{}".format(instituicao) response = requests.get(termoAdesaoURL).json() termoAdesao = response["0"]["co_termo_adesao"] filename = baseFilename.format(instituicao, termoAdesao) url = baseURL + filename file = requests.get(url, stream=True) if file.status_code != 200: print("[{}/{}] [ERROR {}] {}".format(i+1, len(instituicoes), file.status_code, filename)) else: writeToFile(directory, filename, file) print("[{}/{}] Saved to '{}'".format(i+1, len(instituicoes), filename))
nilq/baby-python
python
NAMES = ["cmd_insensetive"] ANSWER = "You used `cmd_insensetive` command!"
nilq/baby-python
python
# # PySNMP MIB module AC-LAG-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AC-LAG-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:09:20 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # AcSlotNumber, acPport, AcPortNumber, AcOpStatus, AcNodeId, AcAdminStatus = mibBuilder.importSymbols("APPIAN-SMI-MIB", "AcSlotNumber", "acPport", "AcPortNumber", "AcOpStatus", "AcNodeId", "AcAdminStatus") OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion") NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup") ObjectIdentity, Integer32, ModuleIdentity, NotificationType, Gauge32, Unsigned32, Counter32, Bits, IpAddress, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "ModuleIdentity", "NotificationType", "Gauge32", "Unsigned32", "Counter32", "Bits", "IpAddress", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "MibIdentifier") TextualConvention, MacAddress, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "MacAddress", "TruthValue", "DisplayString") acLagMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8)) if mibBuilder.loadTexts: acLagMIB.setLastUpdated('0002231600Z') if mibBuilder.loadTexts: acLagMIB.setOrganization('Appian Communications, Inc.') if mibBuilder.loadTexts: acLagMIB.setContactInfo(' David Ward') if mibBuilder.loadTexts: acLagMIB.setDescription('The Appian Communications Link Aggregation module for managing IEEE Std 802.3ad.') lagMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1)) class LacpKey(TextualConvention, Integer32): description = 'The Actor or Partner Key value.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535) class LacpState(TextualConvention, Bits): description = 'The Actor and Partner State values from the LACPDU.' status = 'current' namedValues = NamedValues(("lacpActivity", 0), ("lacpTimeout", 1), ("aggregation", 2), ("synchronization", 3), ("collecting", 4), ("distributing", 5), ("defaulted", 6), ("expired", 7)) class ChurnState(TextualConvention, Integer32): description = 'The state of the Churn Detection machine.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("noChurn", 1), ("churn", 2), ("churnMonitor", 3)) class PortList(TextualConvention, OctetString): description = "Each octet within this value specifies a set of eight ports, with the first octet specifying ports 1 through 8, the second octet specifying ports 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the bridge is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0'." status = 'current' class AcAggInstanceIndex(TextualConvention, Integer32): description = 'An instance of an aggregation group within this OSAP which is within the range of (1..64).' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 64) class AcAggInstanceValue(TextualConvention, Integer32): description = 'An instance of an aggregation group within this OSAP which is within the range of (1..64). A value of zero indicates the aggregator instance has not been determined.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 64) acDot3adAgg = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1)) acDot3adAggPort = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2)) acDot3adTablesLastChanged = MibScalar((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 3), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adTablesLastChanged.setStatus('current') if mibBuilder.loadTexts: acDot3adTablesLastChanged.setDescription('This object indicates the time of the most recent change to the acDot3adAggTable, acDot3adAggPortListTable, or acDot3adAggPortTable.') acDot3adAggTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1), ) if mibBuilder.loadTexts: acDot3adAggTable.setReference('IEEE 802.3 Subclause 30.7.1') if mibBuilder.loadTexts: acDot3adAggTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggTable.setDescription('A table that contains information about every Aggregator that is associated with this System.') acDot3adAggEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggInstanceIndex")) if mibBuilder.loadTexts: acDot3adAggEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggEntry.setDescription('A list of the Aggregator parameters. This is indexed by OSAP node ID - supporting one Aggregator per OSAP.') acDot3adAggNodeIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 1), AcNodeId()) if mibBuilder.loadTexts: acDot3adAggNodeIdIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggNodeIdIndex.setDescription('The node id is the id for this specific node in the OSAP ring.') acDot3adAggInstanceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 2), AcAggInstanceIndex()) if mibBuilder.loadTexts: acDot3adAggInstanceIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggInstanceIndex.setDescription('The instance of this aggregator within this OSAP.') acDot3adAggMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 3), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggMACAddress.setReference('IEEE 802.3 Subclause 30.7.1.1.9') if mibBuilder.loadTexts: acDot3adAggMACAddress.setStatus('current') if mibBuilder.loadTexts: acDot3adAggMACAddress.setDescription('A 6-octet read-only value carrying the individual MAC address assigned to the Aggregator.') acDot3adAggActorSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggActorSystemPriority.setReference('IEEE 802.3 Subclause 30.7.1.1.5') if mibBuilder.loadTexts: acDot3adAggActorSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorSystemPriority.setDescription("A 2-octet read-write value indicating the priority value associated with the Actor's System ID.") acDot3adAggActorSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 5), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggActorSystemID.setReference('IEEE 802.3 Subclause 30.7.1.1.4') if mibBuilder.loadTexts: acDot3adAggActorSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorSystemID.setDescription("A 6-octet read-write MAC address value used as a unique identifier for the System that contains this Aggregator. NOTE-From the perspective of the Link Aggregation mechanisms described in Clause 43, only a single combination of Actor's System ID and System Priority are considered, and no distinction is made between the values of these parameters for an Aggregator and the port(s) that are associated with it; i.e., the protocol is described in terms of the operation of aggregation within a single System. However, the managed objects provided for the Aggregator and the port both allow management of these parameters. The result of this is to permit a single piece of equipment to be configured by management to contain more than one System from the point of view of the operation of Link Aggregation. This may be of particular use in the configuration of equipment that has limited aggregation capability (see 43.6).") acDot3adAggAggregateOrIndividual = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 6), TruthValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggAggregateOrIndividual.setReference('IEEE 802.3 Subclause 30.7.1.1.6') if mibBuilder.loadTexts: acDot3adAggAggregateOrIndividual.setStatus('current') if mibBuilder.loadTexts: acDot3adAggAggregateOrIndividual.setDescription("A read-only Boolean value indicating whether the Aggregator represents an Aggregate (`TRUE') or an Individual link (`FALSE').") acDot3adAggActorAdminKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 7), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggActorAdminKey.setReference('IEEE 802.3 Subclause 30.7.1.1.7') if mibBuilder.loadTexts: acDot3adAggActorAdminKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorAdminKey.setDescription('The current administrative value of the Key for the Aggregator. The administrative Key value may differ from the operational Key value for the reasons discussed in 43.6.2. This is a 16-bit, read-write value. The meaning of particular Key values is of local significance.') acDot3adAggActorOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 8), LacpKey()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggActorOperKey.setReference('IEEE 802.3 Subclause 30.7.1.1.8') if mibBuilder.loadTexts: acDot3adAggActorOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggActorOperKey.setDescription('The current operational value of the Key for the Aggregator. The administrative Key value may differ from the operational Key value for the reasons discussed in 43.6.2. This is a 16-bit read-only value. The meaning of particular Key values is of local significance.') acDot3adAggPartnerSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 9), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPartnerSystemID.setReference('IEEE 802.3 Subclause 30.7.1.1.10') if mibBuilder.loadTexts: acDot3adAggPartnerSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPartnerSystemID.setDescription('A 6-octet read-only MAC address value consisting of the unique identifier for the current protocol Partner of this Aggregator. A value of zero indicates that there is no known Partner. If the aggregation is manually configured, this System ID value will be a value assigned by the local System.') acDot3adAggPartnerSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPartnerSystemPriority.setReference('IEEE 802.3 Subclause 30.7.1.1.11') if mibBuilder.loadTexts: acDot3adAggPartnerSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPartnerSystemPriority.setDescription("A 2-octet read-only value that indicates the priority value associated with the Partner's System ID. If the aggregation is manually configured, this System Priority value will be a value assigned by the local System.") acDot3adAggPartnerOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 11), LacpKey()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPartnerOperKey.setReference('IEEE 802.3 Subclause 30.7.1.1.12') if mibBuilder.loadTexts: acDot3adAggPartnerOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPartnerOperKey.setDescription("The current operational value of the Key for the Aggregator's current protocol Partner. This is a 16-bit read-only value. If the aggregation is manually configured, this Key value will be a value assigned by the local System.") acDot3adAggCollectorMaxDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggCollectorMaxDelay.setReference('IEEE 802.3 Subclause 30.7.1.1.32') if mibBuilder.loadTexts: acDot3adAggCollectorMaxDelay.setStatus('current') if mibBuilder.loadTexts: acDot3adAggCollectorMaxDelay.setDescription('The value of this 16-bit read-write attribute defines the maximum delay, in tens of microseconds, that may be imposed by the Frame Collector between receiving a frame from an Aggregator Parser, and either delivering the frame to its MAC Client or discarding the frame (see 43.2.3.1.1).') acDot3adAggPortListTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 2), ) if mibBuilder.loadTexts: acDot3adAggPortListTable.setReference('IEEE 802.3 Subclause 30.7.1.1.30') if mibBuilder.loadTexts: acDot3adAggPortListTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListTable.setDescription('A table that contains a list of all the ports associated with each Aggregator.') acDot3adAggPortListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 2, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggInstanceIndex")) if mibBuilder.loadTexts: acDot3adAggPortListEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListEntry.setDescription('A list of the ports associated with a given Aggregator. This is indexed by OSAP node ID - supporting one Aggregator per OSAP.') acDot3adAggPortListPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 1, 2, 1, 1), PortList()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortListPorts.setReference('IEEE 802.3 Subclause 30.7.1.1.30') if mibBuilder.loadTexts: acDot3adAggPortListPorts.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListPorts.setDescription('The complete set of ports currently associated with this Aggregator. Each bit set in this list represents an Actor Port member of this Link Aggregation.') acDot3adAggPortTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1), ) if mibBuilder.loadTexts: acDot3adAggPortTable.setReference('IEEE 802.3 Subclause 30.7.2') if mibBuilder.loadTexts: acDot3adAggPortTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortTable.setDescription('A table that contains Link Aggregation Control configuration information about every Aggregation Port associated with this device. A row appears in this table for each physical port.') acDot3adAggPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggPortNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortSlotIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortPortIndex")) if mibBuilder.loadTexts: acDot3adAggPortEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortEntry.setDescription('A list of Link Aggregation Control configuration parameters for each Aggregation Port on this device.') acDot3adAggPortNodeIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 1), AcNodeId()) if mibBuilder.loadTexts: acDot3adAggPortNodeIdIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortNodeIdIndex.setDescription('The node id is the id for this specific node in the OSAP ring.') acDot3adAggPortSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 2), AcSlotNumber()) if mibBuilder.loadTexts: acDot3adAggPortSlotIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortSlotIndex.setDescription('The slot number within the chassis where this module entry resides.') acDot3adAggPortPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 3), AcPortNumber()) if mibBuilder.loadTexts: acDot3adAggPortPortIndex.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPortIndex.setDescription('The port number on the module which represents this instance of an Ethernet access port.') acDot3adAggPortActorSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorSystemPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.2') if mibBuilder.loadTexts: acDot3adAggPortActorSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorSystemPriority.setDescription("A 2-octet read-write value used to define the priority value associated with the Actor's System ID.") acDot3adAggPortActorSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 5), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortActorSystemID.setReference('IEEE 802.3 Subclause 30.7.2.1.3') if mibBuilder.loadTexts: acDot3adAggPortActorSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorSystemID.setDescription('A 6-octet read-only MAC address value that defines the value of the System ID for the System that contains this Aggregation Port.') acDot3adAggPortActorAdminKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 6), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorAdminKey.setReference('IEEE 802.3 Subclause 30.7.2.1.4') if mibBuilder.loadTexts: acDot3adAggPortActorAdminKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorAdminKey.setDescription('The current administrative value of the Key for the Aggregation Port. This is a 16-bit read-write value. The meaning of particular Key values is of local significance.') acDot3adAggPortActorOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 7), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorOperKey.setReference('IEEE 802.3 Subclause 30.7.2.1.5') if mibBuilder.loadTexts: acDot3adAggPortActorOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorOperKey.setDescription('The current operational value of the Key for the Aggregation Port. This is a 16-bit read-only value. The meaning of particular Key values is of local significance.') acDot3adAggPortPartnerAdminSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.6') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemPriority.setDescription("A 2-octet read-write value used to define the administrative value of priority associated with the Partner's System ID. The assigned value is used, along with the value of aAggPortPartnerAdminSystemID, aAggPortPartnerAdminKey, aAggPortPartnerAdminPort, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.") acDot3adAggPortPartnerOperSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.7') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemPriority.setDescription("A 2-octet read-only value indicating the operational value of priority associated with the Partner's System ID. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminSystemPriority if there is no protocol Partner.") acDot3adAggPortPartnerAdminSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 10), MacAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemID.setReference('IEEE 802.3 Subclause 30.7.2.1.8') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminSystemID.setDescription("A 6-octet read-write MACAddress value representing the administrative value of the Aggregation Port's protocol Partner's System ID. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminKey, aAggPortPartnerAdminPort, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.") acDot3adAggPortPartnerOperSystemID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 11), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemID.setReference('IEEE 802.3 Subclause 30.7.2.1.9') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperSystemID.setDescription("A 6-octet read-only MACAddress value representing the current value of the Aggregation Port's protocol Partner's System ID. A value of zero indicates that there is no known protocol Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminSystemID if there is no protocol Partner.") acDot3adAggPortPartnerAdminKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 12), LacpKey()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminKey.setReference('IEEE 802.3 Subclause 30.7.2.1.10') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminKey.setDescription('The current administrative value of the Key for the protocol Partner. This is a 16-bit read-write value. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminSystemID, aAggPortPartnerAdminPort, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperKey = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 13), LacpKey()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperKey.setReference('IEEE 802.3 Subclause 30.7.2.1.11') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperKey.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperKey.setDescription('The current operational value of the Key for the protocol Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminKey if there is no protocol Partner. This is a 16-bit read-only value.') acDot3adAggPortSelectedAggID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 14), AcAggInstanceValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortSelectedAggID.setReference('IEEE 802.3 Subclause 30.7.2.1.12') if mibBuilder.loadTexts: acDot3adAggPortSelectedAggID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortSelectedAggID.setDescription('The identifier value of the Aggregator that this Aggregation Port has currently selected. Zero indicates that the Aggregation Port has not selected an Aggregator, either because it is in the process of detaching from an Aggregator or because there is no suitable Aggregator available for it to select. This value is read-only.') acDot3adAggPortAttachedAggID = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 15), AcAggInstanceValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortAttachedAggID.setReference('IEEE 802.3 Subclause 30.7.2.1.13') if mibBuilder.loadTexts: acDot3adAggPortAttachedAggID.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortAttachedAggID.setDescription('The identifier value of the Aggregator that this Aggregation Port is currently attached to. Zero indicates that the Aggregation Port is not currently attached to an Aggregator. This value is read-only.') acDot3adAggPortActorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortActorPort.setReference('IEEE 802.3 Subclause 30.7.2.1.14') if mibBuilder.loadTexts: acDot3adAggPortActorPort.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorPort.setDescription('The port number locally assigned to the Aggregation Port. The port number is communicated in LACPDUs as the Actor_Port. This value is read-only.') acDot3adAggPortActorPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorPortPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.15') if mibBuilder.loadTexts: acDot3adAggPortActorPortPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorPortPriority.setDescription('The priority value assigned to this Aggregation Port. This 16-bit value is read-write.') acDot3adAggPortPartnerAdminPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPort.setReference('IEEE 802.3 Subclause 30.7.2.1.16') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPort.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPort.setDescription('The current administrative value of the port number for the protocol Partner. This is a 16-bit read-write value. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminSystemID, aAggPortPartnerAdminKey, and aAggPortPartnerAdminPortPriority, in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPort.setReference('IEEE 802.3 Subclause 30.7.2.1.17') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPort.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPort.setDescription("The operational port number assigned to this Aggregation Port by the Aggregation Port's protocol Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminPort if there is no protocol Partner. This 16-bit value is read-only.") acDot3adAggPortPartnerAdminPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPortPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.18') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPortPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminPortPriority.setDescription('The current administrative value of the port priority for the protocol Partner. This is a 16-bit read-write value. The assigned value is used, along with the value of aAggPortPartnerAdminSystemPriority, aAggPortPartnerAdminSystemID, aAggPortPartnerAdminKey, and aAggPortPartnerAdminPort, in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPortPriority.setReference('IEEE 802.3 Subclause 30.7.2.1.19') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPortPriority.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperPortPriority.setDescription('The priority value assigned to this Aggregation Port by the Partner. The value of this attribute may contain the manually configured value carried in aAggPortPartnerAdminPortPriority if there is no protocol Partner. This 16-bit value is read-only.') acDot3adAggPortActorAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 22), LacpState()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortActorAdminState.setReference('IEEE 802.3 Subclause 30.7.2.1.20') if mibBuilder.loadTexts: acDot3adAggPortActorAdminState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorAdminState.setDescription('A string of 8 bits, corresponding to the administrative values of Actor_State (43.4.2) as transmitted by the Actor in LACPDUs. The first bit corresponds to bit 0 of Actor_State (LACP_Activity), the second bit corresponds to bit 1 (LACP_Timeout), the third bit corresponds to bit 2 (Aggregation), the fourth bit corresponds to bit 3 (Synchronization), the fifth bit corresponds to bit 4 (Collecting), the sixth bit corresponds to bit 5 (Distributing), the seventh bit corresponds to bit 6 (Defaulted), and the eighth bit corresponds to bit 7 (Expired). These values allow administrative control over the values of LACP_Activity, LACP_Timeout and Aggregation. This attribute value is read-write.') acDot3adAggPortActorOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 23), LacpState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortActorOperState.setReference('IEEE 802.3 Subclause 30.7.2.1.21') if mibBuilder.loadTexts: acDot3adAggPortActorOperState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortActorOperState.setDescription('A string of 8 bits, corresponding to the current operational values of Actor_State as transmitted by the Actor in LACPDUs. The bit allocations are as defined in 30.7.2.1.20. This attribute value is read-only.') acDot3adAggPortPartnerAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 24), LacpState()).setMaxAccess("readwrite") if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminState.setReference('IEEE 802.3 Subclause 30.7.2.1.22') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerAdminState.setDescription('A string of 8 bits, corresponding to the current administrative value of Actor_State for the protocol Partner. The bit allocations are as defined in 30.7.2.1.20. This attribute value is read-write. The assigned value is used in order to achieve manually configured aggregation.') acDot3adAggPortPartnerOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 25), LacpState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortPartnerOperState.setReference('IEEE 802.3 Subclause 30.7.2.1.23') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortPartnerOperState.setDescription('A string of 8 bits, corresponding to the current values of Actor_State in the most recently received LACPDU transmitted by the protocol Partner. The bit allocations are as defined in 30.7.2.1.20. In the absence of an active protocol Partner, this value may reflect the manually configured value aAggPortPartnerAdminState. This attribute value is read-only.') acDot3adAggPortAggregateOrIndividual = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 1, 1, 26), TruthValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortAggregateOrIndividual.setReference('IEEE 802.3 Subclause 30.7.2.1.24') if mibBuilder.loadTexts: acDot3adAggPortAggregateOrIndividual.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortAggregateOrIndividual.setDescription("A read-only Boolean value indicating whether the Aggregation Port is able to Aggregate (`TRUE') or is only able to operate as an Individual link (`FALSE').") acDot3adAggPortStatsTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2), ) if mibBuilder.loadTexts: acDot3adAggPortStatsTable.setReference('IEEE 802.3 Subclause 30.7.3') if mibBuilder.loadTexts: acDot3adAggPortStatsTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsTable.setDescription('A table that contains Link Aggregation information about every port that is associated with this device. A row appears in this table for each physical port.') acDot3adAggPortStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggPortNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortSlotIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortPortIndex")) if mibBuilder.loadTexts: acDot3adAggPortStatsEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsEntry.setDescription('A list of Link Aggregation Control Protocol statistics for each port on this device.') acDot3adAggPortStatsLACPDUsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsRx.setReference('IEEE 802.3 Subclause 30.7.3.1.2') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsRx.setDescription('The number of valid LACPDUs received on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerPDUsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsRx.setReference('IEEE 802.3 Subclause 30.7.3.1.3') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsRx.setDescription('The number of valid Marker PDUs received on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerResponsePDUsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsRx.setReference('IEEE 802.3 Subclause 30.7.3.1.4') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsRx.setDescription('The number of valid Marker Response PDUs received on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsUnknownRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsUnknownRx.setReference('IEEE 802.3 Subclause 30.7.3.1.5') if mibBuilder.loadTexts: acDot3adAggPortStatsUnknownRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsUnknownRx.setDescription('The number of frames received that either: - carry the Slow Protocols Ethernet Type value (43B.4), but contain an unknown PDU, or: - are addressed to the Slow Protocols group MAC Address (43B.3), but do not carry the Slow Protocols Ethernet Type. This value is read-only.') acDot3adAggPortStatsIllegalRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsIllegalRx.setReference('IEEE 802.3 Subclause 30.7.3.1.6') if mibBuilder.loadTexts: acDot3adAggPortStatsIllegalRx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsIllegalRx.setDescription('The number of frames received that carry the Slow Protocols Ethernet Type value (43B.4), but contain a badly formed PDU or an illegal value of Protocol Subtype (43B.4). This value is read-only.') acDot3adAggPortStatsLACPDUsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsTx.setReference('IEEE 802.3 Subclause 30.7.3.1.7') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsTx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsLACPDUsTx.setDescription('The number of LACPDUs transmitted on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerPDUsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsTx.setReference('IEEE 802.3 Subclause 30.7.3.1.8') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsTx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerPDUsTx.setDescription('The number of Marker PDUs transmitted on this Aggregation Port. This value is read-only.') acDot3adAggPortStatsMarkerResponsePDUsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 2, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsTx.setReference('IEEE 802.3 Subclause 30.7.3.1.9') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsTx.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsMarkerResponsePDUsTx.setDescription('The number of Marker Response PDUs transmitted on this Aggregation Port. This value is read-only.') acDot3adAggPortDebugTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3), ) if mibBuilder.loadTexts: acDot3adAggPortDebugTable.setReference('IEEE 802.3 Subclause 30.7.4') if mibBuilder.loadTexts: acDot3adAggPortDebugTable.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugTable.setDescription('A table that contains Link Aggregation debug information about every port that is associated with this device. A row appears in this table for each physical port.') acDot3adAggPortDebugEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1), ).setIndexNames((0, "AC-LAG-MIB", "acDot3adAggPortNodeIdIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortSlotIndex"), (0, "AC-LAG-MIB", "acDot3adAggPortPortIndex")) if mibBuilder.loadTexts: acDot3adAggPortDebugEntry.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugEntry.setDescription('A list of the debug parameters for a port.') acDot3adAggPortDebugRxState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("currentRx", 1), ("expired", 2), ("defaulted", 3), ("initialize", 4), ("lacpDisabled", 5), ("portDisabled", 6)))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugRxState.setReference('IEEE 802.3 Subclause 30.7.4.1.2') if mibBuilder.loadTexts: acDot3adAggPortDebugRxState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugRxState.setDescription("This attribute holds the value `currentRx' if the Receive state machine for the Aggregation Port is in the CURRENT state, `expired' if the Receive state machine is in the EXPIRED state, `defaulted' if the Receive state machine is in the DEFAULTED state, `initialize' if the Receive state machine is in the INITIALIZE state, `lacpDisabled' if the Receive state machine is in the LACP_DISABLED state, or `portDisabled' if the Receive state machine is in the PORT_DISABLED state. This value is read-only.") acDot3adAggPortDebugLastRxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 2), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugLastRxTime.setReference('IEEE 802.3 Subclause 30.7.4.1.3') if mibBuilder.loadTexts: acDot3adAggPortDebugLastRxTime.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugLastRxTime.setDescription('The value of aTimeSinceSystemReset (F.2.1) when the last LACPDU was received by this Aggregation Port. This value is read-only.') acDot3adAggPortDebugMuxState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("detached", 1), ("waiting", 2), ("attached", 3), ("collecting", 4), ("distributing", 5), ("collectingDistributing", 6)))).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugMuxState.setReference('IEEE 802.3 Subclause 30.7.4.1.4') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxState.setDescription("This attribute holds the value `detached' if the Mux state machine (43.4.14) for the Aggregation Port is in the DETACHED state, `waiting' if the Mux state machine is in the WAITING state, `attached' if the Mux state machine for the Aggregation Port is in the ATTACHED state, `collecting' if the Mux state machine for the Aggregation Port is in the COLLECTING state, `distributing' if the Mux state machine for the Aggregation Port is in the DISTRIBUTING state, and `collectingDistributing' if the Mux state machine for the Aggregation Port is in the COLLECTING_DISTRIBUTING state. This value is read-only.") acDot3adAggPortDebugMuxReason = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugMuxReason.setReference('IEEE 802.3 Subclause 30.7.4.1.5') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxReason.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugMuxReason.setDescription('A human-readable text string indicating the reason for the most recent change of Mux machine state. This value is read-only.') acDot3adAggPortDebugActorChurnState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 5), ChurnState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnState.setReference('IEEE 802.3 Subclause 30.7.4.1.6') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnState.setDescription("The state of the Actor Churn Detection machine (43.4.17) for the Aggregation Port. A value of `noChurn' indicates that the state machine is in either the NO_ACTOR_CHURN or the ACTOR_CHURN_MONITOR state, and `churn' indicates that the state machine is in the ACTOR_CHURN state. This value is read-only.") acDot3adAggPortDebugPartnerChurnState = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 6), ChurnState()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnState.setReference('IEEE 802.3 Subclause 30.7.4.1.7') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnState.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnState.setDescription("The state of the Partner Churn Detection machine (43.4.17) for the Aggregation Port. A value of `noChurn' indicates that the state machine is in either the NO_PARTNER_CHURN or the PARTNER_CHURN_MONITOR state, and `churn' indicates that the state machine is in the PARTNER_CHURN state. This value is read-only.") acDot3adAggPortDebugActorChurnCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnCount.setReference('IEEE 802.3 Subclause 30.7.4.1.8') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChurnCount.setDescription('Count of the number of times the Actor Churn state machine has entered the ACTOR_CHURN state. This value is read-only.') acDot3adAggPortDebugPartnerChurnCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnCount.setReference('IEEE 802.3 Subclause 30.7.4.1.9') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChurnCount.setDescription('Count of the number of times the Partner Churn state machine has entered the PARTNER_CHURN state. This value is read-only.') acDot3adAggPortDebugActorSyncTransitionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorSyncTransitionCount.setReference('IEEE 802.3 Subclause 30.7.4.1.10') if mibBuilder.loadTexts: acDot3adAggPortDebugActorSyncTransitionCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorSyncTransitionCount.setDescription("Count of the number of times the Actor's Mux state machine (43.4.15) has entered the IN_SYNC state. This value is read-only.") acDot3adAggPortDebugPartnerSyncTransitionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerSyncTransitionCount.setReference('IEEE 802.3 Subclause 30.7.4.1.11') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerSyncTransitionCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerSyncTransitionCount.setDescription("Count of the number of times the Partner's Mux state machine (43.4.15) has entered the IN_SYNC state. This value is read-only.") acDot3adAggPortDebugActorChangeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugActorChangeCount.setReference('IEEE 802.3 Subclause 30.7.4.1.12') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChangeCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugActorChangeCount.setDescription("Count of the number of times the Actor's perception of the LAG ID for this Aggregation Port has changed. This value is read-only.") acDot3adAggPortDebugPartnerChangeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 1, 2, 3, 1, 12), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChangeCount.setReference('IEEE 802.3 Subclause 30.7.4.1.13') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChangeCount.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugPartnerChangeCount.setDescription("Count of the number of times the Partner's perception of the LAG ID (see 43.3.6.1) for this Aggregation Port has changed. This value is read-only.") acDot3adAggConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2)) acDot3adAggGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1)) acDot3adAggCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 2)) acDot3adAggGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 1)).setObjects(("AC-LAG-MIB", "acDot3adAggActorSystemID"), ("AC-LAG-MIB", "acDot3adAggActorSystemPriority"), ("AC-LAG-MIB", "acDot3adAggAggregateOrIndividual"), ("AC-LAG-MIB", "acDot3adAggActorAdminKey"), ("AC-LAG-MIB", "acDot3adAggMACAddress"), ("AC-LAG-MIB", "acDot3adAggActorOperKey"), ("AC-LAG-MIB", "acDot3adAggPartnerSystemID"), ("AC-LAG-MIB", "acDot3adAggPartnerSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPartnerOperKey"), ("AC-LAG-MIB", "acDot3adAggCollectorMaxDelay")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggGroup = acDot3adAggGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggGroup.setDescription('A collection of objects providing information about an aggregation.') acDot3adAggPortListGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 2)).setObjects(("AC-LAG-MIB", "acDot3adAggPortListPorts")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortListGroup = acDot3adAggPortListGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortListGroup.setDescription('A collection of objects providing information about every port in an aggregation.') acDot3adAggPortGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 3)).setObjects(("AC-LAG-MIB", "acDot3adAggPortActorSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPortActorSystemID"), ("AC-LAG-MIB", "acDot3adAggPortActorAdminKey"), ("AC-LAG-MIB", "acDot3adAggPortActorOperKey"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperSystemPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminSystemID"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperSystemID"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminKey"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperKey"), ("AC-LAG-MIB", "acDot3adAggPortSelectedAggID"), ("AC-LAG-MIB", "acDot3adAggPortAttachedAggID"), ("AC-LAG-MIB", "acDot3adAggPortActorPort"), ("AC-LAG-MIB", "acDot3adAggPortActorPortPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminPort"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperPort"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminPortPriority"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperPortPriority"), ("AC-LAG-MIB", "acDot3adAggPortActorAdminState"), ("AC-LAG-MIB", "acDot3adAggPortActorOperState"), ("AC-LAG-MIB", "acDot3adAggPortPartnerAdminState"), ("AC-LAG-MIB", "acDot3adAggPortPartnerOperState"), ("AC-LAG-MIB", "acDot3adAggPortAggregateOrIndividual")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortGroup = acDot3adAggPortGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortGroup.setDescription('A collection of objects providing information about every port in an aggregation.') acDot3adAggPortStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 4)).setObjects(("AC-LAG-MIB", "acDot3adAggPortStatsLACPDUsRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerPDUsRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerResponsePDUsRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsUnknownRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsIllegalRx"), ("AC-LAG-MIB", "acDot3adAggPortStatsLACPDUsTx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerPDUsTx"), ("AC-LAG-MIB", "acDot3adAggPortStatsMarkerResponsePDUsTx")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortStatsGroup = acDot3adAggPortStatsGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortStatsGroup.setDescription('A collection of objects providing information about every port in an aggregation.') acDot3adAggPortDebugGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 5)).setObjects(("AC-LAG-MIB", "acDot3adAggPortDebugRxState"), ("AC-LAG-MIB", "acDot3adAggPortDebugLastRxTime"), ("AC-LAG-MIB", "acDot3adAggPortDebugMuxState"), ("AC-LAG-MIB", "acDot3adAggPortDebugMuxReason"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorChurnState"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerChurnState"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorChurnCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerChurnCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorSyncTransitionCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerSyncTransitionCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugActorChangeCount"), ("AC-LAG-MIB", "acDot3adAggPortDebugPartnerChangeCount")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggPortDebugGroup = acDot3adAggPortDebugGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adAggPortDebugGroup.setDescription('A collection of objects providing debug information about every aggregated port.') acDot3adTablesLastChangedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 1, 1, 6)).setObjects(("AC-LAG-MIB", "acDot3adTablesLastChanged")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adTablesLastChangedGroup = acDot3adTablesLastChangedGroup.setStatus('current') if mibBuilder.loadTexts: acDot3adTablesLastChangedGroup.setDescription('A collection of objects providing information about the time of changes to the configuration of aggregations and their ports.') acDot3adAggCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2785, 2, 3, 8, 2, 2, 1)).setObjects(("AC-LAG-MIB", "acDot3adAggGroup"), ("AC-LAG-MIB", "acDot3adAggPortGroup"), ("AC-LAG-MIB", "acDot3adTablesLastChangedGroup"), ("AC-LAG-MIB", "acDot3adAggPortListGroup"), ("AC-LAG-MIB", "acDot3adAggPortStatsGroup"), ("AC-LAG-MIB", "acDot3adAggPortDebugGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): acDot3adAggCompliance = acDot3adAggCompliance.setStatus('current') if mibBuilder.loadTexts: acDot3adAggCompliance.setDescription('The compliance statement for device support of Link Aggregation.') mibBuilder.exportSymbols("AC-LAG-MIB", acDot3adAggPortAggregateOrIndividual=acDot3adAggPortAggregateOrIndividual, acDot3adAggNodeIdIndex=acDot3adAggNodeIdIndex, acDot3adAggPortStatsMarkerPDUsTx=acDot3adAggPortStatsMarkerPDUsTx, acDot3adAggPortActorSystemID=acDot3adAggPortActorSystemID, acDot3adAggPortDebugMuxReason=acDot3adAggPortDebugMuxReason, acDot3adAggPortTable=acDot3adAggPortTable, acDot3adAggPartnerSystemPriority=acDot3adAggPartnerSystemPriority, acDot3adAggPortActorPort=acDot3adAggPortActorPort, acDot3adAggPortActorAdminKey=acDot3adAggPortActorAdminKey, acDot3adAggTable=acDot3adAggTable, PYSNMP_MODULE_ID=acLagMIB, acDot3adAggPortDebugActorChangeCount=acDot3adAggPortDebugActorChangeCount, acDot3adAggPortDebugGroup=acDot3adAggPortDebugGroup, acDot3adAggPortDebugMuxState=acDot3adAggPortDebugMuxState, acDot3adAggPortDebugActorChurnState=acDot3adAggPortDebugActorChurnState, acDot3adAggGroup=acDot3adAggGroup, acDot3adAggPortActorOperKey=acDot3adAggPortActorOperKey, acDot3adAggPortPortIndex=acDot3adAggPortPortIndex, acDot3adAggInstanceIndex=acDot3adAggInstanceIndex, acDot3adAggPortStatsTable=acDot3adAggPortStatsTable, acDot3adAggPortStatsMarkerResponsePDUsTx=acDot3adAggPortStatsMarkerResponsePDUsTx, acDot3adAggActorSystemID=acDot3adAggActorSystemID, acDot3adAggPortActorPortPriority=acDot3adAggPortActorPortPriority, acDot3adAggPortDebugLastRxTime=acDot3adAggPortDebugLastRxTime, acDot3adAggPortDebugActorSyncTransitionCount=acDot3adAggPortDebugActorSyncTransitionCount, acDot3adAggCompliances=acDot3adAggCompliances, acDot3adAggActorSystemPriority=acDot3adAggActorSystemPriority, acDot3adAggCompliance=acDot3adAggCompliance, acDot3adAggPortActorOperState=acDot3adAggPortActorOperState, ChurnState=ChurnState, AcAggInstanceIndex=AcAggInstanceIndex, acDot3adAggPortPartnerAdminPort=acDot3adAggPortPartnerAdminPort, acDot3adAggPortPartnerOperState=acDot3adAggPortPartnerOperState, acDot3adAggPortSlotIndex=acDot3adAggPortSlotIndex, acDot3adAggPortPartnerAdminKey=acDot3adAggPortPartnerAdminKey, acLagMIB=acLagMIB, lagMIBObjects=lagMIBObjects, acDot3adAggPort=acDot3adAggPort, acDot3adAggPortPartnerAdminSystemID=acDot3adAggPortPartnerAdminSystemID, LacpState=LacpState, acDot3adAggPortPartnerOperPort=acDot3adAggPortPartnerOperPort, acDot3adAggConformance=acDot3adAggConformance, acDot3adAggPortStatsEntry=acDot3adAggPortStatsEntry, acDot3adAggPortDebugPartnerChurnCount=acDot3adAggPortDebugPartnerChurnCount, acDot3adAggPortDebugPartnerChangeCount=acDot3adAggPortDebugPartnerChangeCount, acDot3adAggPortPartnerOperKey=acDot3adAggPortPartnerOperKey, acDot3adAggAggregateOrIndividual=acDot3adAggAggregateOrIndividual, acDot3adAggActorOperKey=acDot3adAggActorOperKey, acDot3adAggPartnerOperKey=acDot3adAggPartnerOperKey, AcAggInstanceValue=AcAggInstanceValue, acDot3adAggPortDebugPartnerSyncTransitionCount=acDot3adAggPortDebugPartnerSyncTransitionCount, acDot3adAggPortAttachedAggID=acDot3adAggPortAttachedAggID, acDot3adAggEntry=acDot3adAggEntry, acDot3adAggPortStatsLACPDUsTx=acDot3adAggPortStatsLACPDUsTx, acDot3adAggPortDebugActorChurnCount=acDot3adAggPortDebugActorChurnCount, acDot3adAggPortNodeIdIndex=acDot3adAggPortNodeIdIndex, acDot3adAggPortListGroup=acDot3adAggPortListGroup, acDot3adAggPortPartnerOperPortPriority=acDot3adAggPortPartnerOperPortPriority, acDot3adAggPortPartnerAdminPortPriority=acDot3adAggPortPartnerAdminPortPriority, acDot3adAggPortStatsGroup=acDot3adAggPortStatsGroup, acDot3adAggPortGroup=acDot3adAggPortGroup, acDot3adAggPortPartnerOperSystemID=acDot3adAggPortPartnerOperSystemID, acDot3adAggPortListEntry=acDot3adAggPortListEntry, acDot3adAggPortPartnerAdminState=acDot3adAggPortPartnerAdminState, acDot3adAggPortPartnerOperSystemPriority=acDot3adAggPortPartnerOperSystemPriority, acDot3adAggPortEntry=acDot3adAggPortEntry, acDot3adAggPortDebugEntry=acDot3adAggPortDebugEntry, acDot3adTablesLastChanged=acDot3adTablesLastChanged, acDot3adAggGroups=acDot3adAggGroups, acDot3adAggPartnerSystemID=acDot3adAggPartnerSystemID, PortList=PortList, acDot3adAggCollectorMaxDelay=acDot3adAggCollectorMaxDelay, acDot3adTablesLastChangedGroup=acDot3adTablesLastChangedGroup, acDot3adAggPortStatsUnknownRx=acDot3adAggPortStatsUnknownRx, acDot3adAggPortSelectedAggID=acDot3adAggPortSelectedAggID, LacpKey=LacpKey, acDot3adAggPortActorSystemPriority=acDot3adAggPortActorSystemPriority, acDot3adAggPortStatsMarkerPDUsRx=acDot3adAggPortStatsMarkerPDUsRx, acDot3adAggPortStatsIllegalRx=acDot3adAggPortStatsIllegalRx, acDot3adAggMACAddress=acDot3adAggMACAddress, acDot3adAggPortActorAdminState=acDot3adAggPortActorAdminState, acDot3adAggPortListPorts=acDot3adAggPortListPorts, acDot3adAggPortDebugTable=acDot3adAggPortDebugTable, acDot3adAggPortDebugRxState=acDot3adAggPortDebugRxState, acDot3adAgg=acDot3adAgg, acDot3adAggActorAdminKey=acDot3adAggActorAdminKey, acDot3adAggPortListTable=acDot3adAggPortListTable, acDot3adAggPortDebugPartnerChurnState=acDot3adAggPortDebugPartnerChurnState, acDot3adAggPortPartnerAdminSystemPriority=acDot3adAggPortPartnerAdminSystemPriority, acDot3adAggPortStatsLACPDUsRx=acDot3adAggPortStatsLACPDUsRx, acDot3adAggPortStatsMarkerResponsePDUsRx=acDot3adAggPortStatsMarkerResponsePDUsRx)
nilq/baby-python
python
import torch import torch.optim as optim device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) import sys sys.path.append("PATH") from Models.MATCH.MATCH import MATCH from Models.MATCH.functions import (get_tensors, augment, format_output, CE_loss) from Models.metrics import (AUC, Brier) from Simulation.data_simulation_base import simulate_JM_base from Simulation.data_simulation_nonPH import simulate_JM_nonPH import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler pd.options.mode.chained_assignment = None import pickle import time start = time.time() n_sim = 2 I = 1000 obstime = [0,1,2,3,4,5,6,7,8,9,10] landmark_times = [1,2,3,4,5] pred_windows = [1,2,3] AUC_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) iAUC_array = np.zeros((n_sim, len(landmark_times))) true_AUC_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) true_iAUC_array = np.zeros((n_sim, len(landmark_times))) BS_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) iBS_array = np.zeros((n_sim, len(landmark_times))) true_BS_array = np.zeros((n_sim, len(landmark_times), len(pred_windows))) true_iBS_array = np.zeros((n_sim, len(landmark_times))) for i_sim in range(n_sim): if i_sim % 10 == 0: print("i_sim:",i_sim) np.random.seed(i_sim) data_all = simulate_JM_base(I=I, obstime=obstime, opt="none", seed=i_sim) data = data_all[data_all.obstime < data_all.time] ## split train/test random_id = range(I) #np.random.permutation(range(I)) train_id = random_id[0:int(0.7*I)] test_id = random_id[int(0.7*I):I] train_data = data[data["id"].isin(train_id)] test_data = data[data["id"].isin(test_id)] ## Scale data using Min-Max Scaler minmax_scaler = MinMaxScaler(feature_range=(-1,1)) train_data.loc[:,["X1","X2","Y1","Y2","Y3"]] = minmax_scaler.fit_transform(train_data.loc[:,["X1","X2","Y1","Y2","Y3"]]) test_data.loc[:,["X1","X2","Y1","Y2","Y3"]] = minmax_scaler.transform(test_data.loc[:,["X1","X2","Y1","Y2","Y3"]]) train_long, train_base, train_mask, e_train, t_train, train_obs_time = get_tensors(train_data.copy()) # for BS ## Train model torch.manual_seed(0) out_len = 4 model = MATCH(3,2, out_len) model = model.train() optimizer = optim.Adam(model.parameters()) n_epoch = 25 batch_size = 32 test_long, test_base, test_mask, e_test, t_test, test_obs_time = get_tensors(test_data.copy()) test_long, test_base, test_mask, e_test, t_test, subjid_test = augment( test_long, test_base, test_mask, e_test, t_test) loss_values = [] loss_test = [] for epoch in range(n_epoch): running_loss = 0 train_id = np.random.permutation(train_id) for batch in range(0, len(train_id), batch_size): optimizer.zero_grad() indices = train_id[batch:batch+batch_size] batch_data = train_data[train_data["id"].isin(indices)] batch_long, batch_base, batch_mask, batch_e, batch_t, obs_time = get_tensors(batch_data.copy()) batch_long, batch_base, batch_mask, batch_e, batch_t, subjid = augment( batch_long, batch_base, batch_mask, batch_e, batch_t) if len(indices)>1: #drop if last batch size is 1 yhat_surv = torch.softmax(model(batch_long, batch_base, batch_mask), dim=1) s_filter, e_filter = format_output(obs_time, batch_mask, batch_t, batch_e, out_len) loss = CE_loss(yhat_surv, s_filter, e_filter) loss.backward() optimizer.step() running_loss += loss yhat_surv_test = torch.softmax(model(test_long, test_base, test_mask), dim=1) s_filter_t, e_filter_t = format_output(test_obs_time, test_mask, t_test, e_test, out_len) loss_t = CE_loss(yhat_surv_test, s_filter_t, e_filter_t) loss_test.append(loss_t.tolist()) loss_values.append(running_loss.tolist()) plt.plot((loss_values-np.min(loss_values))/(np.max(loss_values)-np.min(loss_values)), 'b-') plt.plot((loss_test-np.min(loss_test))/(np.max(loss_test)-np.min(loss_test)), 'g-') for LT_index, LT in enumerate(landmark_times): pred_times = [x+LT for x in pred_windows] # Only keep subjects with survival time > landmark time tmp_data = test_data.loc[test_data["time"]>LT,:] tmp_id = np.unique(tmp_data["id"].values) tmp_all = data_all.loc[data_all["id"].isin(tmp_id),:] # Only keep longitudinal observations <= landmark time tmp_data = tmp_data.loc[tmp_data["obstime"]<=LT,:] true_prob_tmp = tmp_all.loc[tmp_all["predtime"].isin(pred_times), ["true"]].values.reshape(-1,len(pred_times)) true_prob_LT = tmp_all.loc[tmp_all["predtime"]==LT, ["true"]].values true_prob_tmp = true_prob_tmp / true_prob_LT tmp_long, tmp_base, tmp_mask, e_tmp, t_tmp, obs_time = get_tensors(tmp_data.copy()) model = model.eval() surv_pred = torch.softmax(model(tmp_long, tmp_base, tmp_mask), dim=1) surv_pred = surv_pred.detach().numpy() surv_pred = surv_pred[:,::-1].cumsum(axis=1)[:,::-1] surv_pred = surv_pred[:,1:(out_len+1)] auc, iauc = AUC(surv_pred, e_tmp.numpy(), t_tmp.numpy(), np.array(pred_times)) AUC_array[i_sim, LT_index, :] = auc iAUC_array[i_sim, LT_index] = iauc auc, iauc = AUC(true_prob_tmp, np.array(e_tmp), np.array(t_tmp), np.array(pred_times)) true_AUC_array[i_sim, LT_index, :] = auc true_iAUC_array[i_sim, LT_index] = iauc bs, ibs = Brier(surv_pred, e_tmp.numpy(), t_tmp.numpy(), e_train.numpy(), t_train.numpy(), LT, np.array(pred_windows)) BS_array[i_sim, LT_index, :] = bs iBS_array[i_sim, LT_index] = ibs bs, ibs = Brier(true_prob_tmp, e_tmp.numpy(), t_tmp.numpy(), e_train.numpy(), t_train.numpy(), LT, np.array(pred_windows)) true_BS_array[i_sim, LT_index, :] = bs true_iBS_array[i_sim, LT_index] = ibs np.set_printoptions(precision=3) print("AUC:",np.nanmean(AUC_array, axis=0)) print("iAUC:",np.mean(iAUC_array, axis=0)) print("True AUC:",np.nanmean(true_AUC_array, axis=0)) print("True iAUC:",np.mean(true_iAUC_array, axis=0)) print("BS:\n", np.mean(BS_array, axis=0)) print("iBS:",np.mean(iBS_array, axis=0)) print("True BS:\n", np.mean(true_BS_array, axis=0)) print("True iBS:",np.mean(true_iBS_array, axis=0)) end = time.time() print("total time:", (end-start)/60) ''' ## save results results = {"AUC":AUC_array, "iAUC":iAUC_array, "True_AUC":true_AUC_array, "True_iAUC":true_iAUC_array, "BS":BS_array, "iBS":iBS_array, "True_BS":true_BS_array, "True_iBS":true_iBS_array} outfile = open('MATCH_results.pickle', 'wb') pickle.dump(results, outfile) outfile.close() ''' ''' ## read results infile = open('MATCH_results.pickle', 'rb') results = pickle.load(infile) infile.close '''
nilq/baby-python
python
# coding=utf-8 import ctypes import json import time import jsonpath import requests import progressbar import requests.packages.urllib3 headers1 = { 'User-Agent': 'Mozilla/5.0 (Windows NT 5.1; rv:24.0) Gecko/20100101 Firefox/24.0' } headers2 = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.69 ' 'Safari/537.36 TheWorld 6 ' } url1 = "https://api.github.com/repos/way-zer/ScriptAgent4MindustryExt/releases/latest" url2 = "https://api.github.com/repos/Anuken/Mindustry/releases/latest" assets1 = requests.get(url1, headers=headers1).json()['assets'] tag1 = requests.get(url1, headers=headers1).json()['tag_name'] name1 = jsonpath.jsonpath(assets1, "$..name") zn = [i for i, x in enumerate(name1) if x.rfind('zip') != -1] zipname = name1[zn] jn = [i for i, x in enumerate(name1) if x.rfind('jar') != -1] jarname = name1[jn] down1 = jsonpath.jsonpath(assets1, "$..browser_download_url") zd = [i for i, x in enumerate(down1) if x.rfind('zip') != -1] zipdown = down1[zd] jd = [i for i, x in enumerate(down1) if x.rfind('jar') != -1] jardown = down1[jd] updata1 = requests.get(url1, headers=headers1).json()['body'] ctypes.WinDLL("user32.dll").MessageBoxW(0, updata1, "插件更新提醒".decode("utf8"), 0) time.sleep(3) tag2 = requests.get(url2, headers=headers1).json()['tag_name'] assets2 = requests.get(url2, headers=headers2).json()['assets'] name2 = jsonpath.jsonpath(assets2, "$..name") md = [i for i, x in enumerate(name2) if x.find('M') != -1] mdtname = name2[md] sd = [i for i, x in enumerate(name2) if x.find('server') != -1] sername = name2[sd] down2 = jsonpath.jsonpath(assets2, "$..browser_download_url") md = [i for i, x in enumerate(down2) if x.find('M') != -1] mdtdown = down2[md] sd = [i for i, x in enumerate(down2) if x.find('server') != -1] serdown = down2[sd] updata2 = requests.get(url2, headers=headers2).json()['body'] ctypes.WinDLL("user32.dll").MessageBoxW(0, updata2, "核心更新提醒".decode("utf8"), 0) def DownLoad(save, url): response = requests.request("GET", url, stream=True, data=None, headers=None) requests.packages.urllib3.disable_warnings() save_path = save total_length = int(response.headers.get("Content-Length")) with open(save_path, 'wb') as f: widgets = ['Progress: ', progressbar.Percentage(), ' ', progressbar.Bar(marker='#', left='[', right=']'), ' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()] pbar = progressbar.ProgressBar(widgets=widgets, maxval=total_length).start() for chunk in response.iter_content(chunk_size=1): if chunk: f.write(chunk) f.flush() pbar.update(len(chunk) + 1) pbar.finish()
nilq/baby-python
python
# -*- coding: utf-8 -*- ''' Provides tools to help unit test projects using pop. For now, provides mock Hub instances. ''' # Import python libs import inspect import copy from asyncio import iscoroutinefunction from functools import partial # Import third party libs try: from asynctest.mock import create_autospec except ImportError: from unittest.mock import create_autospec as mock_create_autospec def create_autospec(spec, *args, **kwargs): if iscoroutinefunction(spec): raise Exception('MockHub requires asynctest in order to mock async functions') return mock_create_autospec(spec, *args, **kwargs) # Import pop libs from pop.contract import Contracted from pop.loader import LoadedMod from pop.hub import Hub, Sub class _LookUpTable: def __init__(self, *args, **kwargs): self._lut = {} super().__init__(*args, **kwargs) def contains(self, key): return self.is_hashable(key) and key in self._lut def update(self, key, value): if self.is_hashable(key): self._lut[key] = value def lookup(self, key): return self._lut[key] def is_hashable(self, key): try: _ = {key: None} return True except TypeError: return False def __len__(self): return len(self._lut) class _LazyPop: __lazy_classes = [Hub, Sub, LoadedMod] class __Lazy: pass def __init__(self, obj, lut=None): if isinstance(obj, Hub): lut = _LookUpTable() lut.update('hub', self) lut.update(obj, self) elif isinstance(obj, Sub): obj._load_all() self.__lut = lut self.__obj = obj for attr_name in self.__attr_names(): setattr(self, attr_name, _LazyPop.__Lazy) def __attr_names(self): # TODO: '_' - is this actually right? what should I really expose? attrs = [attr for attr in self.__obj.__dict__ if not attr.startswith('_')] if isinstance(self.__obj, Hub): attrs += list(self.__obj._subs) elif isinstance(self.__obj, Sub): attrs += list(self.__obj._loaded) attrs += list(self.__obj._subs) elif isinstance(self.__obj, LoadedMod): attrs += list(self.__obj._attrs) else: raise Exception('Standard objects should not be lazy: {}'.format(str(self.__obj))) return attrs def __getattribute__(self, item): if not item.strip('_'): raise NotImplementedError if '.' in item: result = self for part in item.split('.').copy(): result = getattr(result, part) return result attr = super().__getattribute__(item) if attr is _LazyPop.__Lazy: orig = getattr(self.__obj, item) if self.__lut.contains(orig): attr = self.__lut.lookup(orig) elif [True for cls in self.__lazy_classes if isinstance(orig, cls)]: attr = self.__class__(orig, self.__lut) elif isinstance(orig, Contracted): attr = self._mock_function(orig) else: attr = self._mock_attr(orig) self.__lut.update(orig, attr) setattr(self, item, attr) return attr def _mock_attr(self, a): return create_autospec(a, spec_set=True) def _mock_function(self, f): raise NotImplementedError() def strip_hub(f): ''' returns a no-op function with the same function signature... minus the first parameter (hub). ''' if inspect.iscoroutinefunction(f): newf = 'async ' else: newf = '' newf += 'def {}('.format(f.__name__) params = inspect.signature(f).parameters new_params = [] for param in params: if params[param].kind is inspect.Parameter.VAR_POSITIONAL: new_params.append('*{}'.format(param)) elif params[param].kind is inspect.Parameter.VAR_KEYWORD: new_params.append('**{}'.format(param)) else: new_params.append(param) if params[param].default is not inspect.Parameter.empty: new_params[-1] += '="has default"' newf += ', '.join(new_params[1:]) # skip hub newf += '): pass' scope = {} exec(newf, scope) return scope[f.__name__] class MockHub(_LazyPop): ''' Provides mocks mirroring a real hub:: hub.sub.mod.fn() # mock hub.sub.mod.attr # mock ''' def _mock_function(self, f): return create_autospec(strip_hub(f.func), spec_set=True) class NoContractHub(_LazyPop): ''' Provides access to real functions, bypassing contracts and mocking attributes:: hub.sub.mod.fn() # executes real function, no contracts hub.sub.mod.attr # mock ''' def _mock_function(self, f): return partial(f.func, self._LazyPop__lut.lookup('hub')) def mock_contracted(c): mock_func = create_autospec(c.func, spec_set=True) mock_func.__module__ = c.func.__module__ mock_func.__dict__.update(copy.deepcopy(c.func.__dict__)) return Contracted(c.hub, c.contracts, mock_func, c.ref, c.name) class ContractHub(_LazyPop): ''' Runs a call through the contract system, but the function is a mock. Mostly useful for integration tests: hub.sub.mod.fn() # executes mock function, real contracts hub.sub.mod.attr # mock You can verify what parameters are passed to a function after going through loaded contracts:: contract_hub.sub.mod.fn('foo') assert contract_hub.sub.mod.fn.called_with('bar') -------------------------------- You can view or modify the contracts that will be executed on one function for a test - but first: MODIFYING CONTRACTS THIS WAY IS NOT SAFE ON REAL HUBS AND OTHER TESTING HUB VARIANTS! I have previously thought of modifying contracts with mocks, only to realize what I really want is to unit test a specific contract. Think twice before using this functionality. -------------------------------- The contract modules are visible via hub.sub.mod.fn.contracts, and the contract functions that will be called, wrapping fn are visible via hub.sub.mod.fn.contract_functions. It is safe to modify the contracts list or contract_functions dict only on a ContractHub. Examine that the first contract function to be called is 'foo.pre_fn', then bypass it:: assert contract_hub.sub.mod.fn.contract_functions['pre'][0].__module__ is 'foo' assert contract_hub.sub.mod.fn.contract_functions['pre'][0].__name__ is 'pre_fn' hub.sub.mod.fn.contract_functions['pre'][0] = create_autospec(hub.sub.mod.fn.contract_functions['pre'][0]) Assert that one contract will be called before another:: assert contract_hub.sub.mod.fn.contracts.index(contract1) < contract_hub.sub.mod.fn.contracts.index(contract2) ''' def _mock_function(self, f): return mock_contracted(f)
nilq/baby-python
python
import pwn def gnu_hash(s): h = 0 h = 5381 for c in s: h = h * 33 + ord(c) return h & 0xffffffff class DynELF: def __init__(self, path, leak, base = None): if isinstance(path, pwn.ELF): self.elf = path else: self.elf = pwn.elf.load(path) self.leak = leak self.base = base def lookup (self, symb, lib = 'libc'): if self.elf.elfclass == 'ELF32': return self._lookup32(symb, lib) if self.elf.elfclass == 'ELF64': return self._lookup64(symb, lib) def _lookup32 (self, symb, lib): pwn.log.bug('Unimplemented') def _lookup64 (self, symb, lib): base = self.base leak = self.leak gotoff = self.elf.sections['.got.plt']['addr'] if base is None: pass # XXX: Read base address # else: # pwn.log.die('Position independent ELF needs a base address') else: gotplt = base + gotoff pwn.log.waitfor('Resolving "%s"' % symb) def status(s): pwn.log.status('Leaking %s' % s) status('link_map') link_map = leak.q(gotplt, 1) status('%s load address' % lib) cur = link_map while True: addr = leak.q(cur + 8) name = leak.s(addr) if lib in name: break cur = leak.q(cur + 24) libbase = leak.q(cur) status('program headers offset') e_phoff = leak.q(libbase + 32) e_ph = libbase + e_phoff status('.dynamic section offset') cur = e_ph while True: typ = leak.d(cur) if typ == 2: break cur += 7 * 8 dynoff = leak.q(cur + 16) dyn = libbase + dynoff status('.gnu.hash, .strtab and .symtab offsets') cur = dyn gnuhsh = None strtab = None symtab = None while None in [gnuhsh, strtab, symtab]: tag = leak.q(cur) if tag == 5: strtab = leak.q(cur, 1) elif tag == 6: symtab = leak.q(cur, 1) elif tag == 0x6ffffef5: gnuhsh = leak.q(cur, 1) cur += 16 status('.gnu.hash parms') nbuckets = leak.d(gnuhsh) symndx = leak.d(gnuhsh, 1) maskwords = leak.d(gnuhsh, 2) shift2 = leak.d(gnuhsh, 3) buckets = gnuhsh + 16 + 8 * maskwords chains = buckets + 4 * nbuckets status('hash chain index') hsh = gnu_hash(symb) bucket = hsh % nbuckets ndx = leak.d(buckets, bucket) chain = chains + 4 * (ndx - symndx) if ndx == 0: pwn.log.failed('Empty chain') return None status('hash chain') i = 0 while True: hsh2 = leak.d(chain) if (hsh | 1) == (hsh2 | 1): break if hsh2 & 1: pwn.log.failed('No hash') return None i += 1 sym = symtab + 24 * ndx status('symbol offset') offset = leak.q(sym, 1) pwn.log.succeeded() return offset + libbase
nilq/baby-python
python
import os # kepaknaga@gmail.com def cuci(): os.system('clear') cuci() while True: print('====================') print('=====GITKU v2.0=====') print(' 0 = git pull') print(' 1 = git add .') print(' 2 = git commit -m') print(' 3 = git push') print(' 4 = git add & commit') print(' 5 = git diff') print(' 6 = git status') print(' 7 = git checkout .') print('19 = exit') jwp = input('Ans: ') if jwp == "1": cuci() os.system('git add .') print('Done add') elif jwp == "2": note = input("comment : ") cuci() os.system("git commit -m '" + note + "'") elif jwp == "3": note = input("branch : ") cuci() os.system('git push -u origin ' + note) elif jwp == "4": cuci() os.system('git add .') print('Done add') note = input("comment : ") os.system("git commit -m '" + note + "'") elif jwp == "5": cuci() os.system('git diff') elif jwp == "6": cuci() os.system('git status') elif jwp == "7": note = input("sure (y/n) : ") if note is 'y': cuci() os.system('git checkout .') else: cuci() elif jwp == "0": cuci() os.system('git pull') elif jwp == "19": cuci() break else: os.system('git ' + jwp)
nilq/baby-python
python
from typing import Tuple, List import numpy as np from GPy.core.parameterization.priors import Prior, Gaussian from numpy.linalg import LinAlgError from statsmodels.stats.correlation_tools import cov_nearest from src.autoks.backend.kernel import get_priors from src.autoks.core.active_set import ActiveSet from src.autoks.core.covariance import Covariance from src.autoks.distance import util # Adapted from Malkomes et al., 2016 # Bayesian optimization for automated model selection (BOMS) # c.f. https://github.com/gustavomalkomes/automated_model_selection # For now this represents the active set class ActiveModels = ActiveSet class DistanceBuilder: """DistanceBuilder Build distance matrix between models.""" hyperparameter_data_noise_samples: np.ndarray _average_distance: np.ndarray def __init__(self, noise_prior: Prior, num_samples: int, max_num_hyperparameters: int, max_num_kernels: int, active_models: ActiveModels, initial_model_indices: List[int], data_X: np.ndarray, sampling_method: str = 'generalized_halton'): self.num_samples = num_samples self.max_num_hyperparameters = max_num_hyperparameters self.max_num_kernels = max_num_kernels self._sampling_method = sampling_method self.probability_samples = util.probability_samples(max_num_hyperparameters=self.max_num_hyperparameters, num_samples=self.num_samples, sampling_method=self._sampling_method) assert noise_prior.__class__ == Gaussian noise_prior = np.array([noise_prior]) noise_samples = util.prior_sample(noise_prior, self.probability_samples) self.hyperparameter_data_noise_samples = np.exp(noise_samples) self._average_distance = np.full((self.max_num_kernels, self.max_num_kernels), np.nan) np.fill_diagonal(self._average_distance, 0) self.precompute_information(active_models, initial_model_indices, data_X) def precompute_information(self, active_models: ActiveModels, new_candidates_indices: List[int], data_X: np.ndarray) -> None: """Precompute distance information for each new candidate. :param active_models: :param new_candidates_indices: :param data_X: :return: """ for i in new_candidates_indices: covariance = active_models.models[i].covariance precomputed_info = self.create_precomputed_info(covariance, data_X) active_models.models[i].info = precomputed_info def update(self, active_models: ActiveModels, new_candidates_indices: List[int], all_candidates_indices: List[int], selected_indices: List[int], data_X: np.ndarray) -> None: """Update average distance between models. :param active_models: :param new_candidates_indices: :param all_candidates_indices: :param selected_indices: :param data_X: :return: """ # First step is to precompute information for the new candidate models self.precompute_information(active_models, new_candidates_indices, data_X) # Second step is to compute the distance between the trained models vs candidate models. new_evaluated_models = selected_indices[-1] all_old_candidates_indices = np.setdiff1d(all_candidates_indices, new_candidates_indices) # i) new evaluated models vs all old candidates. self.compute_distance(active_models, [new_evaluated_models], list(all_old_candidates_indices.tolist())) # ii) new candidate models vs all trained models self.compute_distance(active_models, selected_indices, new_candidates_indices) def get_kernel(self, index: int) -> np.ndarray: """ :param index: :return: """ return self._average_distance[:index, :index] @staticmethod def metric(data_i, data_j, **kwargs) -> float: raise NotImplementedError def compute_distance(self, active_models: ActiveModels, indices_i: List[int], indices_j: List[int]) -> None: for i in indices_i: for j in indices_j: dist = self.metric(active_models.models[i].info, active_models.models[j].info) self._average_distance[i, j] = dist self._average_distance[j, i] = dist def create_precomputed_info(self, covariance: Covariance, data_X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: raise NotImplementedError class HellingerDistanceBuilder(DistanceBuilder): """HellingerDistanceBuilder builds distances based on the Hellinger distance between the model's Gram matrices. """ @staticmethod def metric(data_i, data_j, **kwargs) -> float: return HellingerDistanceBuilder.hellinger_distance(*data_i, *data_j, **kwargs) @staticmethod def hellinger_distance(log_det_i: np.ndarray, mini_gram_matrices_i: np.ndarray, log_det_j: np.ndarray, mini_gram_matrices_j: np.ndarray, tol: float = 0.02) -> float: """Hellinger distance between two multivariate Gaussian distributions with zero means zero. https://en.wikipedia.org/wiki/Hellinger_distance """ are_different = np.abs(log_det_i - log_det_j) > tol indices = np.arange(are_different.size) logdet_p_and_q = log_det_i.copy() for i in indices[are_different]: p_K = mini_gram_matrices_i[:, :, i] q_K = mini_gram_matrices_j[:, :, i] p_and_q_kernels = 0.5 * (p_K + q_K) chol_p_and_q = chol_safe(p_and_q_kernels, tol) logdet_p_and_q[i] = 2 * np.sum(np.log(np.diag(chol_p_and_q)), axis=0) # Compute log distance. log_det_sum = log_det_i + log_det_j log_hellinger = 0.25 * log_det_sum - 0.5 * logdet_p_and_q # Exponentiate. hellinger = 1 - np.exp(log_hellinger) distance = np.mean(hellinger, axis=0) # for numerical stability, clip distance to [0, 1] before taking sqrt distance = np.clip(distance, 0, 1) distance = np.sqrt(distance) return float(distance) def create_precomputed_info(self, covariance: Covariance, data_X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: n = data_X.shape[0] tolerance = 1e-6 log_det = np.full(self.num_samples, np.nan) mini_gram_matrices = np.full((n, n, self.num_samples), np.nan) cov_priors = get_priors(covariance.raw_kernel) hyperparameters = util.prior_sample(cov_priors, self.probability_samples) for i in range(hyperparameters.shape[0]): hyp = hyperparameters[i, :] lmbda = self.hyperparameter_data_noise_samples[i] covariance.raw_kernel[:] = hyp k = covariance.raw_kernel.K(data_X, data_X) k = k + lmbda * np.eye(k.shape[0]) mini_gram_matrices[:, :, i] = k chol_k = chol_safe(k, tolerance) log_det[i] = 2 * np.sum(np.log(np.diag(chol_k)), axis=0) return log_det, mini_gram_matrices class FrobeniusDistanceBuilder(DistanceBuilder): def __init__(self, noise_prior: Prior, num_samples: int, max_num_hyperparameters: int, max_num_kernels: int, active_models: ActiveModels, initial_model_indices: List[int], data_X: np.ndarray): super().__init__(noise_prior, num_samples, max_num_hyperparameters, max_num_kernels, active_models, initial_model_indices, data_X) @staticmethod def metric(data_i, data_j, **kwargs) -> float: return FrobeniusDistanceBuilder.frobenius_distance(data_i, data_j) @staticmethod def frobenius_distance(a: np.ndarray, b: np.ndarray) -> float: """Average Frobenius distance between a vs b.""" distance = np.mean(np.sqrt(np.sum((a - b) ** 2, axis=0))) return float(distance) def create_precomputed_info(self, covariance: Covariance, data_X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: n = data_X.shape[0] vectors = np.full((n ** 2, self.num_samples), np.nan, dtype=np.float32) cov_priors = get_priors(covariance.raw_kernel) hyperparameters = util.prior_sample(cov_priors, self.probability_samples) for i in range(hyperparameters.shape[0]): hyp = hyperparameters[i, :] noise_var = self.hyperparameter_data_noise_samples[i] covariance.raw_kernel[:] = hyp prior_covariance = covariance.raw_kernel.K(data_X, data_X) prior_covariance += noise_var * np.eye(prior_covariance.shape[0]) vectors[:, i] = prior_covariance.reshape(n * n).copy() return vectors class CorrelationDistanceBuilder(DistanceBuilder): @staticmethod def metric(data_i, data_j, **kwargs) -> float: return CorrelationDistanceBuilder.correlation_distance(data_i, data_j) @staticmethod def correlation_distance(a: np.ndarray, b: np.ndarray) -> float: """Average correlation distance between a vs b.""" a_mean = np.mean(a, axis=0) b_mean = np.mean(b, axis=0) a_centered = a - a_mean b_centered = b - b_mean # Batch dot product: sum of dot products for all vectors in a and dot_prod = np.einsum('ij,ji->i', a_centered.T, b_centered) a_norm = np.linalg.norm(a_centered, axis=0) b_norm = np.linalg.norm(b_centered, axis=0) correlation = dot_prod / (a_norm * b_norm) # For numerical stability, clip distance to [0, 1] before taking sqrt. correlation = np.clip(correlation, 0, 1) # Ordinally equivalent to the angular distance (arccos(correlation)). # See Metric distances derived from cosine similarity and Pearson and # Spearman correlations, Dongen & Enright (2012). correlation_dist = np.sqrt(0.5 * (1 - correlation)) distance = np.mean(correlation_dist, axis=0) return float(distance) def create_precomputed_info(self, covariance: Covariance, data_X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: n = data_X.shape[0] vectors = np.full((n ** 2, self.num_samples), np.nan, dtype=np.float32) cov_priors = get_priors(covariance.raw_kernel) hyperparameters = util.prior_sample(cov_priors, self.probability_samples) for i in range(hyperparameters.shape[0]): hyp = hyperparameters[i, :] noise_var = self.hyperparameter_data_noise_samples[i] covariance.raw_kernel[:] = hyp prior_covariance = covariance.raw_kernel.K(data_X, data_X) prior_covariance += noise_var * np.eye(prior_covariance.shape[0]) vectors[:, i] = prior_covariance.reshape(n * n).copy() return vectors def fix_numerical_problem(k: np.ndarray, tolerance: float) -> np.ndarray: """ :param k: :param tolerance: :return: """ k = cov_nearest(k, threshold=tolerance) cholesky_k = np.linalg.cholesky(k).T return cholesky_k def chol_safe(k: np.ndarray, tolerance: float) -> np.ndarray: """Safe Cholesky decomposition. k: covariance matrix (n x n) """ try: cholesky_k = np.linalg.cholesky(k).T except LinAlgError: # Decomposition failed, k may not be positive-definite. # Try to recover by making the covariance matrix positive-definite. cholesky_k = fix_numerical_problem(k, tolerance) return cholesky_k
nilq/baby-python
python
# # PySNMP MIB module H3C-OBJECT-INFO-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-OBJECT-INFO-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:10:08 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection") h3cCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cCommon") NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup") MibIdentifier, IpAddress, Unsigned32, Integer32, Counter32, ObjectIdentity, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Gauge32, iso, TimeTicks, ModuleIdentity, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "IpAddress", "Unsigned32", "Integer32", "Counter32", "ObjectIdentity", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Gauge32", "iso", "TimeTicks", "ModuleIdentity", "Counter64") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") h3cObjectInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55)) h3cObjectInfo.setRevisions(('2004-12-27 00:00',)) if mibBuilder.loadTexts: h3cObjectInfo.setLastUpdated('200412270000Z') if mibBuilder.loadTexts: h3cObjectInfo.setOrganization(' Huawei 3Com Technologies Co., Ltd. ') h3cObjectInformation = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1)) h3cObjectInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1, 1), ) if mibBuilder.loadTexts: h3cObjectInfoTable.setStatus('current') h3cObjectInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1, 1, 1), ).setIndexNames((0, "H3C-OBJECT-INFO-MIB", "h3cObjectInfoOID"), (0, "H3C-OBJECT-INFO-MIB", "h3cObjectInfoType"), (0, "H3C-OBJECT-INFO-MIB", "h3cObjectInfoTypeExtension")) if mibBuilder.loadTexts: h3cObjectInfoEntry.setStatus('current') h3cObjectInfoOID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1, 1, 1, 1), ObjectIdentifier()) if mibBuilder.loadTexts: h3cObjectInfoOID.setStatus('current') h3cObjectInfoType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("reserved", 1), ("accessType", 2), ("dataType", 3), ("dataRange", 4), ("dataLength", 5)))) if mibBuilder.loadTexts: h3cObjectInfoType.setStatus('current') h3cObjectInfoTypeExtension = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 10))) if mibBuilder.loadTexts: h3cObjectInfoTypeExtension.setStatus('current') h3cObjectInfoValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 1, 1, 1, 4), OctetString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cObjectInfoValue.setStatus('current') h3cObjectInfoMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 2)) h3cObjectInfoMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 2, 1)) h3cObjectInfoMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 2, 1, 1)).setObjects(("H3C-OBJECT-INFO-MIB", "h3cObjectInfoTableGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cObjectInfoMIBCompliance = h3cObjectInfoMIBCompliance.setStatus('current') h3cObjectInfoMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 2, 2)) h3cObjectInfoTableGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 10, 2, 55, 2, 2, 1)).setObjects(("H3C-OBJECT-INFO-MIB", "h3cObjectInfoValue")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cObjectInfoTableGroup = h3cObjectInfoTableGroup.setStatus('current') mibBuilder.exportSymbols("H3C-OBJECT-INFO-MIB", h3cObjectInfoEntry=h3cObjectInfoEntry, h3cObjectInfo=h3cObjectInfo, h3cObjectInfoTable=h3cObjectInfoTable, h3cObjectInfoType=h3cObjectInfoType, h3cObjectInfoValue=h3cObjectInfoValue, h3cObjectInfoMIBConformance=h3cObjectInfoMIBConformance, h3cObjectInformation=h3cObjectInformation, h3cObjectInfoTypeExtension=h3cObjectInfoTypeExtension, h3cObjectInfoTableGroup=h3cObjectInfoTableGroup, h3cObjectInfoMIBGroups=h3cObjectInfoMIBGroups, h3cObjectInfoMIBCompliances=h3cObjectInfoMIBCompliances, h3cObjectInfoOID=h3cObjectInfoOID, h3cObjectInfoMIBCompliance=h3cObjectInfoMIBCompliance, PYSNMP_MODULE_ID=h3cObjectInfo)
nilq/baby-python
python
# Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import logging import re import github from github.Issue import Issue from github.IssueComment import IssueComment from gerrit_to_github_issues import errors LOG = logging.getLogger(__name__) def construct_issue_list(match_list: list) -> list: new_list = [] for issue in match_list: try: new_list.append(int(issue)) except ValueError: LOG.warning(f'Value {issue} could not be converted to `int` type') return new_list def parse_issue_number(commit_msg: str) -> dict: # Searches for Relates-To or Closes tags first to match and return LOG.debug(f'Parsing commit message: {commit_msg}') related = re.findall(r'(?<=Relates-To: #)(.*?)(?=\n)', commit_msg) LOG.debug(f'Captured related issues: {related}') closes = re.findall(r'(?<=Closes: #)(.*?)(?=\n)', commit_msg) LOG.debug(f'Captured closes issues: {closes}') if related or closes: return { 'related': construct_issue_list(related), 'closes': construct_issue_list(closes) } # If no Relates-To or Closes tags are defined, find legacy [#X] style tags LOG.debug('Falling back to legacy tags') legacy_matches = re.findall(r'(?<=\[#)(.*?)(?=\])', commit_msg) LOG.debug(f'Captured legacy issues: {legacy_matches}') if not legacy_matches: return {} return { 'related': construct_issue_list(legacy_matches) } def remove_duplicated_issue_numbers(issue_dict: dict) -> dict: if 'closes' in issue_dict: issue_dict['related'] = [x for x in issue_dict['related'] if x not in issue_dict['closes']] return issue_dict def get_client(github_user: str, github_pw: str, github_token: str) -> github.Github: if github_token: return github.Github(github_token) if github_user and github_pw: return github.Github(github_user, github_pw) raise errors.GithubConfigurationError def get_bot_comment(issue: Issue, bot_name: str, ps_number: str) -> IssueComment: for i in issue.get_comments(): if i.user.login == bot_name and ps_number in i.body: return i def assign_issues(repo: github.Repository): open_issues = [i for i in repo.get_issues() if i.state == 'open'] for issue in open_issues: try_assign(issue) def try_assign(issue: github.Issue): # find the most recent assignment request assignment_request = None for comment in issue.get_comments().reversed: if '/assign' in comment.body: assignment_request = comment break if not assignment_request: # Looks like no one wants this issue return if not issue.assignees: # If no one has been assigned yet, let the user take the issue issue.add_to_assignees(assignment_request.user) issue.create_comment(f'assigned {assignment_request.user.login}') return if issue_age(issue) > 30: # If the issue is 1 months old and the original assignees haven't # closed it yet, let's assume that they've stopped working on it and # allow the new user to have this issue old_assignees = issue.assignees for assignee in old_assignees: issue.remove_from_assignees(assignee) issue.add_to_assignees(assignment_request.user) comment_body = f'unassigned: {", ".join([a for a in old_assignees])}\n' + \ f'assigned: {assignment_request.user.login}' issue.create_comment(comment_body) return # If we've made it here, a user has requested to be assigned to a non-stale # issue which is already assigned. Just notify the core team and let them # handle the conflict. comment_body = f'Unable to assign {assignment_request.user.login}. Please ' + \ f'contact a member of the @airshipit/airship-cores team for ' + \ f'help with assignments.' issue.create_comment(comment_body) def issue_age(issue): return (datetime.now() - issue.created_at).days
nilq/baby-python
python
__author__ = 'Devesh Bajpai' ''' https://codeforces.com/problemset/problem/381/A Solution: This is very similar to the DP card game problem. Since the numbers are distinct, it avoids the complex case when both the ends are same and the player would pick the side which exposes the smaller number for next round. That would require a DP solution. But here we can just simulate the movement with 2 pointers and update the 2 players' points. ''' def solve(n, arr): is_serajas_turn = True s = 0 e = n - 1 seraja = 0 dima = 0 while s <= e: if arr[s] < arr[e]: this_round_point = arr[e] e -= 1 else: this_round_point = arr[s] s += 1 if is_serajas_turn: seraja += this_round_point else: dima += this_round_point is_serajas_turn = not is_serajas_turn return str(seraja) + " " + str(dima) if __name__ == "__main__": n = int(raw_input()) arr = map(int, raw_input().split(" ")) print solve(n, arr)
nilq/baby-python
python
"""This script contains the main authentication and hash generation functions""" import subprocess from shadow_auth._internal.classes import ShadowHash from shadow_auth._internal.enums import Algorithm from shadow_auth._internal.validations import ( validate_system_requirements_first ) from shadow_auth._internal.exceptions import ( # Exceptions InvalidArgumentType, ValidateUserError, # Exception Messages MESSAGE_INVALID_ALGORITHM_TYPE, MESSAGE_INVALID_SALT_TYPE, MESSAGE_INVALID_TEXT_TYPE, MESSAGE_INVALID_USERNAME_TYPE, MESSAGE_INVALID_HASHED_PASSWORD_TYPE, MESSAGE_INVALID_PASSWORD_TYPE, MESSAGE_CANT_GENERATE_HASH ) def _generate_openssl_hash(algorithm: Algorithm, salt: str, text: str) -> str: """ Internal function that generates a Hash using the openssl program. :param algorithm: A valid hashing algorithm to be used :param salt: The salt added when generating the hash :param text: The text to be hashed :return: A hashed string :raises InvalidArgumentType: """ if not isinstance(algorithm, Algorithm): raise InvalidArgumentType(MESSAGE_INVALID_ALGORITHM_TYPE) if not isinstance(salt, str): raise InvalidArgumentType(MESSAGE_INVALID_SALT_TYPE) if not isinstance(text, str): raise InvalidArgumentType(MESSAGE_INVALID_TEXT_TYPE) result = subprocess.check_output( "echo {text} | openssl passwd -{algorithm} -salt {salt} -stdin".format( text=text, algorithm=algorithm.value, salt=salt ), shell=True ).decode("utf-8")[:-1] return result def _generate_random_openssl_hash() -> str: """Internal function that generates a random Hash using the openssl program.""" from random import choice from string import ascii_letters random_string: str = lambda size: ''.join(choice(ascii_letters) for x in range(size)) algorithm: Algorithm = choice([Algorithm.MD5, Algorithm.SHA_256, Algorithm.SHA_512]) salt: str = random_string(8) text: str = random_string(choice([5,6,7,8,9,10])) result = _generate_openssl_hash(algorithm=algorithm, salt=salt, text=text) return result def _generate_fake_user_hash(username: str) -> str: """Internal function that generates a fake reproducible Hash using the openssl program.""" hashed_username = str(abs(hash(username+"abcd"))) i = 0 while len(hashed_username) < 8: hashed_username = hashed_username + hashed_username[i] i += 1 salt_text = "" for letter_index in range(8): char_num =int(hashed_username[letter_index]) if char_num %2 == 0: salt_text = salt_text + chr(65 + char_num) else: salt_text = salt_text + chr(97 + char_num) return _generate_openssl_hash(algorithm=Algorithm.SHA_512, salt=salt_text, text=salt_text) def _get_user_password_hash_from_shadow_file(username: str) -> str: """ Internal function that retrieves the password hash from a Linux user. If the user does not exist a fake result is returned as a safety measure. :param username: A valid hashing algorithm to be used :return: A the hashed password string :raises InvalidArgumentType: """ if not isinstance(username, str): raise InvalidArgumentType(MESSAGE_INVALID_USERNAME_TYPE) try: result = subprocess.check_output( "cat /etc/shadow | grep {user}".format(user=username), shell=True ).decode("utf-8").split(":")[1] return result except subprocess.CalledProcessError: return _generate_random_openssl_hash() @validate_system_requirements_first def generate_openssl_hash(algorithm: Algorithm, salt: str, text: str) -> str: """ Generates a Hash using the openssl program. :param algorithm: A valid hashing algorithm to be used :param salt: The salt added when generating the hash :param text: The text to be hashed :return: A hashed string :raises PrerequisiteException, InvalidArgumentType: """ if not isinstance(algorithm, Algorithm): raise InvalidArgumentType(MESSAGE_INVALID_ALGORITHM_TYPE) if not isinstance(salt, str): raise InvalidArgumentType(MESSAGE_INVALID_SALT_TYPE) if not isinstance(text, str): raise InvalidArgumentType(MESSAGE_INVALID_TEXT_TYPE) result = _generate_openssl_hash(algorithm=algorithm, salt=salt, text=text) return result @validate_system_requirements_first def validate_with_hash(username: str, hashed_password: str) -> bool: """ Validates the given credentials for a user in the system using a hashed password. A random hash is used to compare the provided hash as a safety measure if the user does not exist, has a blank password, or the account is disabled. :param username: The user to be validated in the system :param hashed_password: The password hash to be used to compare the credentials :return: true if credentials are valid, false if they are not. :raises PrerequisiteException, InvalidArgumentType, InvalidArgumentFormat: """ if not isinstance(username, str): raise InvalidArgumentType(MESSAGE_INVALID_USERNAME_TYPE) if not isinstance(hashed_password, str): raise InvalidArgumentType(MESSAGE_INVALID_HASHED_PASSWORD_TYPE) if len(hashed_password.split("$")) != 4: return False user_hash = _get_user_password_hash_from_shadow_file(username) if (user_hash == "") or ("!" in user_hash) or ("*" in user_hash) or ("$" not in user_hash): user_hash = _generate_random_openssl_hash() shadow_object = ShadowHash(hashed_password) return shadow_object.equals(user_hash) @validate_system_requirements_first def validate_with_password(username: str, password: str) -> bool: """ Validates the given credentials for a user in the system using a string password. A random hash is used to compare the provided password as a safety measure if the user does not exist, has a blank password, or the account is disabled. :param username: The user to be validated in the system :param password: The password to be used to compare the credentials :return: true if credentials are valid, false if they are not :raises PrerequisiteException, InvalidArgumentType, InvalidArgumentFormat, ValidateUserError: """ if not isinstance(username, str): raise InvalidArgumentType(MESSAGE_INVALID_USERNAME_TYPE) if not isinstance(password, str): raise InvalidArgumentType(MESSAGE_INVALID_PASSWORD_TYPE) user_hash = _get_user_password_hash_from_shadow_file(username) if (user_hash == "") or ("!" in user_hash) or ("*" in user_hash) or ("$" not in user_hash): user_hash = _generate_random_openssl_hash() shadow_object = ShadowHash(user_hash) if shadow_object.algorithm not in [enum.value for enum in Algorithm]: raise ValidateUserError(MESSAGE_CANT_GENERATE_HASH) return shadow_object.equals(_generate_openssl_hash( algorithm=Algorithm(shadow_object.algorithm), salt=shadow_object.salt, text=password) ) @validate_system_requirements_first def get_password_info(username: str) -> dict: """ Returns the type of algorithm and salt of a user. A fake result is returned as a safety measure if the user does not exist, has a blank password, or the account is disabled. :param username: The user in the system :return: {"algorithm": "xxxx", "salt": "xxxx"} :raises PrerequisiteException, InvalidArgumentType: """ if not isinstance(username, str): raise InvalidArgumentType(MESSAGE_INVALID_USERNAME_TYPE) try: user_hash = subprocess.check_output( "cat /etc/shadow | grep {user}".format(user=username), shell=True ).decode("utf-8").split(":")[1] except subprocess.CalledProcessError: user_hash = _generate_fake_user_hash(username) if (user_hash == "") or ("!" in user_hash) or ("*" in user_hash) or ("$" not in user_hash): user_hash = _generate_fake_user_hash(username) split_hash = user_hash.split("$") return {"algorithm": split_hash[1], "salt": split_hash[2]}
nilq/baby-python
python
""" Some simple logging functionality, inspired by rllab's logging. Logs to a tab-separated-values file (path/to/output_directory/progress.txt) """ import atexit import json import os import os.path as osp import shutil import sys import time import warnings from collections import defaultdict from pathlib import Path from typing import Dict import joblib import numpy as np import pandas as pd import torch from box_pusher.agents.utils.mpi_tools import mpi_statistics_scalar, proc_id from box_pusher.agents.utils.serialization_utils import convert_json from torch.utils import tensorboard from torch.utils.tensorboard import SummaryWriter color2num = dict( gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38, ) def colorize(string, color, bold=False, highlight=False): """ Colorize a string. This function was originally written by John Schulman. """ attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append("1") return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string) class Logger: def __init__( self, workspace: str, exp_name="default", tensorboard=True, clear_out=False ) -> None: # self.data_dict = defaultdict(list) self.tagged_data = {} self.raw_values_keys = ( set() ) # set of keys for values that don't need statistics computed self.stats = {} self.tb_writer: SummaryWriter = None self.tensorboard = tensorboard self.workspace = workspace self.exp_path = osp.join(workspace, exp_name) self.clear_out = clear_out self.log_path = osp.join(self.exp_path, "logs") self.model_path = osp.join(self.exp_path, "models") self.raw_log_file = osp.join(self.exp_path, "raw.csv") self.headers = [] if proc_id() == 0: Path(self.workspace).mkdir(parents=True, exist_ok=True) if clear_out: if osp.exists(self.exp_path): shutil.rmtree(self.exp_path, ignore_errors=True) Path(self.exp_path).mkdir(parents=True, exist_ok=True) Path(self.model_path).mkdir(parents=True, exist_ok=True) if self.tensorboard: self.tb_writer = SummaryWriter(log_dir=self.log_path) def close(self): if proc_id() == 0 and self.tb_writer is not None: self.tb_writer.close() def setup_pytorch_saver(self, model): """ setup saver so logger has a reference to what needs to be saved. Makeslogger a little more efficient and avoids the caller having to deal with proc ids """ self.model = model def save_config(self, config: Dict, verbose=2): """ save configuration of experiments to the experiment directory """ if proc_id() == 0: config_path = osp.join(self.exp_path, "config.json") config_json = convert_json(config) output = json.dumps(config_json, indent=2, sort_keys=True) if verbose > 1: self.print("Saving config:\n", color="cyan", bold=True) if verbose > 1: self.print(output) with open(config_path, "w") as out: out.write(output) def save_model(self, name): """ save the model """ if proc_id() == 0: torch.save(self.model.state_dict(), osp.join(self.model_path, name)) def print(self, msg, file=sys.stdout, color="", bold=False): """ print to terminal, stdout by default. Ensures only the main process ever prints. """ if proc_id() == 0: if color == "": print(msg, file=file) else: print(colorize(msg, color, bold=bold), file=file) sys.stdout.flush() def store(self, tag="default", value_only=False, **kwargs): """ store some scalar value to a key, which is accumulated until logged. if value_only is True, then when printing/logging this data, no statistics aggregation is done. Expect only one worker to ever call store with value_only=True """ if tag not in self.tagged_data: self.tagged_data[tag] = defaultdict(list) data_dict = self.tagged_data[tag] for k, v in kwargs.items(): data_dict[k].append(v) if value_only == True: self.raw_values_keys.add(f"{tag}/{k}") def get_statistics(self): return self.stats def pretty_print_table(self, data): if proc_id() == 0: vals = [] key_lens = [len(key) for key in data.keys()] max_key_len = max(15, max(key_lens)) keystr = "%" + "%d" % max_key_len fmt = "| " + keystr + "s | %15s |" n_slashes = 22 + max_key_len print("-" * n_slashes) for key in data.keys(): val = data[key] valstr = "%8.3g" % val if hasattr(val, "__float__") else val print(fmt % (key, valstr)) vals.append(val) print("-" * n_slashes, flush=True) def log(self, step): """ log accumulated data to tensorboard if enabled and to the terminal and locally. Also syncs collected data across processes Statistics are then retrievable as a dict via get_statistics """ # if val is not None: # super().log_tabular(key, val) # else: for tag in self.tagged_data.keys(): data_dict = self.tagged_data[tag] for k, v in data_dict.items(): vals = ( np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape) > 0 else v ) if f"{tag}/{k}" not in self.raw_values_keys: stats = mpi_statistics_scalar(vals, with_min_and_max=True) avg, std, minv, maxv = stats[0], stats[1], stats[2], stats[3] key_vals = { f"{tag}/{k}_avg": avg, f"{tag}/{k}_std": std, f"{tag}/{k}_min": minv, f"{tag}/{k}_max": maxv, } else: if isinstance(v, list): if len(v) == 1: vals = v[0] else: vals = np.array(v) key_vals = { f"{tag}/{k}": vals, } if proc_id() == 0: for name, scalar in key_vals.items(): if self.tensorboard: self.tb_writer.add_scalar(name, scalar, step) self.stats[name] = scalar if proc_id() == 0: if not osp.isfile(self.raw_log_file): with open(self.raw_log_file, "w") as f: self.headers = [] for h in sorted(list(self.stats.keys())): self.headers.append(h) f.write(",".join(self.headers) + "\n") new_headers = False for k in self.stats.keys(): if k not in self.headers: self.headers.append(k) new_headers = True if new_headers: os.rename(self.raw_log_file, self.raw_log_file + ".temp") orig_contents = [] with open(self.raw_log_file + ".temp", "r") as f: orig_contents = f.readlines() with open(self.raw_log_file, "w") as f: f.write(",".join(self.headers) + "\n") f.write("".join(orig_contents[1:])) os.remove(self.raw_log_file + ".temp") with open(self.raw_log_file, "a") as f: vals = [] for h in self.headers: if h in self.stats: vals.append(str(self.stats[h])) else: vals.append("") f.write(",".join(vals) + "\n") def reset(self): """ call this each time after log is called """ for tag in self.tagged_data.keys(): self.tagged_data[tag] = defaultdict(list) self.stats = {}
nilq/baby-python
python
# train.py ### command> python train.py --fold 0 --model decision_tree_gini import argparse import os import joblib import pandas as pd from sklearn import metrics import config import dispatcher def run(fold, model): # read the training data with folds df = pd.read_csv(config.TRAINING_FILE) # training data is where kfold is not equal to provided fold # also, note that we reset the index df_train = df[df.kfold != fold].reset_index(drop=True) # validation data is where kfold is equal to provided fold df_valid = df[df.kfold == fold].reset_index(drop=True) # drop the label column from dataframe and convert it to # a numpy array by using .values. # target is label column in the dataframe x_train = df_train.drop("target", axis=1).values y_train = df_train.target.values # similarly, for validation, we have x_valid = df_valid.drop("target", axis=1).values y_valid = df_valid.target.values # fetch the model from model_dispatcher clf = dispatcher.model[model] # fir the model on training data clf.fit(x_train, y_train) # create predictions for validation samples preds = clf.predict(x_valid) # calculate & print accuracy accuracy = metrics.accuracy_score(y_valid, preds) print(f"Fold={fold}, Accuracy={accuracy}") # save the model joblib.dump( clf, os.path.join(config.MODEL_OUTPUT, f"dt_{fold}.bin") ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--fold",type=int ) parser.add_argument( "--model",type=str ) args = parser.parse_args() run( fold=args.fold,model=args.model )
nilq/baby-python
python
from base64 import b64encode import jinja2 import json import os import yaml import kubernetes.config import kubernetes.client from simpleflow.utils import json_dumps class KubernetesJob(object): def __init__(self, job_name, domain, response): self.job_name = job_name self.response = response self.domain = domain def load_config(self): """ Load config in the current Kubernetes cluster, either via in cluster config or via the local kube config if on a development machine. """ try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: kubernetes.config.load_kube_config() def compute_job_definition(self): """ Compute a job definition from the SWF response """ # extract job template location input = self.response.get("input") if not input: raise ValueError("Cannot extract job template from empty input") meta = json.loads(input).get("meta") if not meta: raise ValueError("Cannot extract 'meta' key from task input") job_template = meta["k8s_job_template"] # setup variables that will be interpolated in the template variables = dict(os.environ) for key, value in meta.get("k8s_job_data", {}): variables[key] = value variables["JOB_NAME"] = self.job_name variables["PAYLOAD"] = b64encode(json_dumps(self.response)) # render the job template with those context variables path, filename = os.path.split(job_template) env = jinja2.Environment( loader=jinja2.FileSystemLoader(path or './'), undefined=jinja2.StrictUndefined, ) rendered = env.get_template(filename).render(variables) return yaml.load(rendered) def schedule(self): """ Schedule a job from the given job template. See example of it here: https://github.com/kubernetes-incubator/client-python/blob/master/examples/create_deployment.py """ # build job definition job_definition = self.compute_job_definition() # load cluster config self.load_config() # schedule job api = kubernetes.client.BatchV1Api() namespace = os.getenv("K8S_NAMESPACE", "default") api.create_namespaced_job(body=job_definition, namespace=namespace)
nilq/baby-python
python
# coding: utf-8 from bs4 import BeautifulSoup import requests from urllib.parse import urljoin import json def main(): ''' To crawl the base url of each city, and saved the results as a json file named baseurl. ''' url_json = {} url = "http://www.tianqihoubao.com/aqi/" headers = {'user-agent': 'my-app/0.0.1'} response = requests.get(url, headers = headers) response.encoding = "gbk" html = response.text soup = BeautifulSoup(html, "lxml") dls = soup.find_all("dl") for dl in dls: dts = dl.find_all("dt") for dt in dts: prov_name = dt.get_text() url_json[prov_name] = {} dds = dl.find_all("dd") for dd in dds: aas = dd.find_all("a", href = True) for aa in aas: city_name = aa.get_text().strip() city_url = urljoin(url, aa["href"]) url_json[prov_name][city_name] = city_url # with open("url.json", "w", encoding='utf-8') as f: # # indent 超级好用,格式化保存字典,默认为None,小于0为零个空格 # f.write(json.dumps(url_json, indent=4)) with open("url.json", "w", encoding='utf-8') as f: json.dump(url_json, f, ensure_ascii = False, indent = 4) if __name__ == "__main__": main() pass
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- import unittest from httpglob import httpglob, path_match class PathMatchCase(unittest.TestCase): def test_010_path_match(self): self.assertTrue(path_match('/v1.1.1/image_1.1.1.zip', '/v1.1.1/image_1.1.1.zip')) def test_020_path_match(self): self.assertTrue(path_match('/v1.1.1/image_1.1.1.zip', '/v1.1.1/image_1.1.?.zip')) def test_030_path_match(self): self.assertFalse(path_match('/v1.1.1/image_1.1.1.zip', '/v1.1.1/image_1.2.?.zip')) class HTTPGlobCase(unittest.TestCase): def test_openssl(self): httpglob('https://www.openssl.org/source/old/*/openssl-1.?.*.tar.gz') if __name__ == '__main__': unittest.main()
nilq/baby-python
python
from django.utils.translation import ugettext_lazy as _ SERVICE_TYPES = ( ("HKI_MY_DATA", _("HKI_MY_DATA")), ("BERTH", _("BERTH")), ("YOUTH_MEMBERSHIP", _("YOUTH_MEMBERSHIP")), ("GODCHILDREN_OF_CULTURE", _("GODCHILDREN_OF_CULTURE")), )
nilq/baby-python
python
# Betül İNCE - 180401020 with open("veriler.txt", "r+") as data: cases = [] for line in data: cases.append(int(line)) size = len(cases) sum_cases = sum(cases) def first_order_polynomial(): n = len(cases) sum_of_x = 0 sum_of_y = sum(cases) sum_of_xiyi = 0 sum_of_xi_square = 0 for i in range(n): sum_of_x += i+1 sum_of_xiyi += (i+1)*cases[i] sum_of_xi_square += (i+1)*(i+1) a1 = (n*sum_of_xiyi - sum_of_x*sum_of_y)/(n*sum_of_xi_square - sum_of_x**2) a0 = (sum_of_y - a1*sum_of_x)/n #print(a0,a1) for i in range(n): print( cases[i], a0+a1*(i+1)) #print("first order polynomial--cases and the values that we found:") #first_order_polynomial() def polynominal(d): x_list = [] size = d + 1 matrix = [[0 for i in range(d + 1)] for j in range(d + 1)] for i in range(len(cases)): x_list.append(i + 1) for i in range(size): for j in range(size): for x in x_list: matrix[i][j] += pow(x, i + j) for i in range(size): sum_of_xy = 0 for j in x_list: sum_of_xy += cases[j - 1] * pow(j, i) matrix[i].append(sum_of_xy) return matrix def solution_with_gauss(matrix): n = len(matrix) for i in range(0, n): maxCol = abs(matrix[i][i]) maxRow = i for j in range(i + 1, n): if abs(matrix[j][i]) > maxCol: maxCol = abs(matrix[j][i]) maxRow = j for j in range(i, n + 1): temp = matrix[maxRow][j] matrix[maxRow][j] = matrix[i][j] matrix[i][j] = temp for j in range(i + 1, n): c = -matrix[j][i] / matrix[i][i] for k in range(i, n + 1): if i == k: matrix[j][k] = 0 else: matrix[j][k] += c * matrix[i][k] x = [0 for i in range(n)] for i in range(n - 1, -1, -1): x[i] = matrix[i][n] / matrix[i][i] for j in range(i - 1, -1, -1): matrix[j][n] -= matrix[j][i] * x[i] return x def correlation(comp_list): sr = 0 st = 0 yavg = sum_cases / size for i in range(size): sr += (cases[i] - comp_list[i]) ** 2 for i in range(size): st += (cases[i] - yavg) ** 2 square_r = ((st - sr) / st) r = square_r ** (0.5) return r def found_values(): correlation_values = [] x_list = [] for i in range(len(cases)): x_list.append(i + 1) print(x_list) for i in range(1, 7): comp_list = [] matrix = polynominal(i) coef = solution_with_gauss(matrix) sum = 0 for i in x_list: for j in range(len(coef)): sum += coef[j] * (i ** j) comp_list.append(sum) sum = 0 correlation_values.append(correlation(comp_list)) return correlation_values found_values = found_values() best_correlation=sorted(found_values)[-1] with open("sonuc.txt", "w") as file: for d in range(1, 7): matrix = polynominal(d) coef = solution_with_gauss(matrix) file.write("correlation value of {}. polynom: ".format(d)) file.write(str(found_values[d-1])+"\n") file.write("while approaching {}. polynomial:\n".format(d)) for c in range(len(coef)): file.write(" ") file.write("a{} = ".format(c)) file.write(str(coef[c]) + "\n") file.write("\n") file.write("best polynom is {}".format(found_values.index(best_correlation)+1))
nilq/baby-python
python
#NAME: mappingLoadTest.py #AUTH: Ryan McCartney, EEE Undergraduate, Queen's University Belfast #DESC: Loading Map from CSV file test #COPY: Copyright 2019, All Rights Reserved, Ryan McCartney import cv2 as cv from mapping import Mapping import time #Initialise Mapping map = Mapping(0.1,40,60) print('INFO: Mapping initialised.') #Load Map from File start = time.time() mapLocation = 'data/maps/mapTest1.csv' map.loadMap(mapLocation) end = time.time() delay = end - start text = 'INFO: %.2fs taken to load map.'%round(delay,2) print(text) while 1: #Show map in Window cv.imshow('Global Map',map.getViewableMap()) #Quit program when 'q' key is pressed if cv.waitKey(1) & 0xFF == ord('q'): break cv.destroyAllWindows()
nilq/baby-python
python
""" .. module:: CClassifierLogistic :synopsis: Logistic Regression (aka logit, MaxEnt) classifier .. moduleauthor:: Battista Biggio <battista.biggio@unica.it> .. moduleauthor:: Ambra Demontis <ambra.demontis@unica.it> """ from sklearn.linear_model import LogisticRegression from secml.array import CArray from secml.ml.classifiers import CClassifierLinear from secml.ml.classifiers.loss import CLossLogistic from secml.ml.classifiers.regularizer import CRegularizerL2 from secml.ml.classifiers.gradients import \ CClassifierGradientLogisticMixin class CClassifierLogistic(CClassifierLinear, CClassifierGradientLogisticMixin): """Logistic Regression (aka logit, MaxEnt) classifier. Parameters ---------- C : float, optional Penalty parameter C of the error term. Default 1.0. max_iter : int, optional Maximum number of iterations taken for the solvers to converge. Default 100. random_state : int, RandomState or None, optional The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Default None. preprocess : CPreProcess or str or None, optional Features preprocess to be applied to input data. Can be a CPreProcess subclass or a string with the type of the desired preprocessor. If None, input data is used as is. Attributes ---------- class_type : 'logistic' """ __class_type = 'logistic' _loss = CLossLogistic() _reg = CRegularizerL2() def __init__(self, C=1.0, max_iter=100, random_state=None, preprocess=None): CClassifierLinear.__init__(self, preprocess=preprocess) self.C = C self.max_iter = max_iter self.random_state = random_state @property def max_iter(self): return self._max_iter @property def random_state(self): return self._random_state @max_iter.setter def max_iter(self, value): self._max_iter = int(value) @random_state.setter def random_state(self, value): self._random_state = value @property def C(self): """Penalty parameter C of the error term.""" return self._C @C.setter def C(self, value): """Set the penalty parameter C of the error term. Parameters ---------- value : float Penalty parameter C of the error term. """ self._C = float(value) def _init_clf(self): self._sklearn_clf = LogisticRegression( penalty='l2', dual=False, tol=0.0001, C=self._C, fit_intercept=True, intercept_scaling=1.0, class_weight=None, solver='liblinear', random_state=self._random_state, max_iter=self._max_iter, multi_class='ovr', verbose=0, warm_start=False, ) def _fit(self, dataset): """Trains the One-Vs-All Logistic classifier. The following is a private method computing one single binary (2-classes) classifier of the OVA schema. Representation of each classifier attribute for the multiclass case is explained in corresponding property description. Parameters ---------- dataset : CDataset Binary (2-classes) training set. Must be a :class:`.CDataset` instance with patterns data and corresponding labels. Returns ------- trained_cls : classifier Instance of the used solver trained using input dataset. """ self._init_clf() self._sklearn_clf.fit(dataset.X.get_data(), dataset.Y.tondarray()) self._w = CArray( self._sklearn_clf.coef_, tosparse=dataset.issparse).ravel() self._b = CArray(self._sklearn_clf.intercept_[0])[0] return self
nilq/baby-python
python
import csv from io import StringIO, BytesIO import pandas as pd from des.models import DynamicEmailConfiguration from django.conf import settings from django.core.mail import EmailMultiAlternatives from django.test import Client from django.urls import reverse from django_rq import job from scripts.integration_test import IntegrationTest from series_tiempo_ar_api.apps.dump.models import DumpFile from series_tiempo_ar_api.apps.management.models import IntegrationTestTask, IntegrationTestConfig from series_tiempo_ar_api.libs.indexing.api_index_enqueue import enqueue_job_with_timeout class DjangoSeriesFetcher: def __init__(self): self.client = Client() def fetch(self, series_id, **kwargs): data = {'ids': series_id, 'format': 'csv'} data.update(kwargs) response = self.client.get(reverse('api:series:series'), data=data) if response.status_code != 200: return None out_stream = StringIO(str(response.content, encoding='utf8')) return pd.read_csv(out_stream, parse_dates=['indice_tiempo'], index_col='indice_tiempo') def get_url(self, serie_id: str): endpoint = IntegrationTestConfig.get_solo().api_endpoint return f'{endpoint}?ids={serie_id}&last=1000&format=csv' @job("integration_test", timeout=-1) def run_integration(task: IntegrationTestTask = None): task = task or IntegrationTestTask.objects.create() metadata = DumpFile.objects.filter(node=None, file_type=DumpFile.TYPE_CSV, file_name=DumpFile.FILENAME_METADATA).last() if not metadata: task.log("No se encontró un dump de metadatos generado en la aplicación.") task.refresh_from_db() task.status = IntegrationTestTask.FINISHED task.save() return series_metadata = pd.read_csv(BytesIO(metadata.file.read()), index_col='serie_id') setattr(settings, "ALLOWED_HOSTS", ["*"]) result = IntegrationTest(series_metadata=series_metadata, fetcher=DjangoSeriesFetcher()).test() task.log(str(result)) if result: send_email(result, task) task.refresh_from_db() task.status = IntegrationTestTask.FINISHED task.save() def send_email(result: list, task: IntegrationTestTask): subject = u'[{}] API Series de Tiempo: Test de integración'.format(settings.ENV_TYPE) emails = IntegrationTestConfig.get_solo().recipients.values_list('email', flat=True) if not emails: task.log("No hay usuarios registrados para recibir los reportes del test. Mail no enviado.") return msg = "Errores en los datos de las series detectados. Ver el archivo adjunto" config = DynamicEmailConfiguration.get_solo() mail = EmailMultiAlternatives(subject, msg, from_email=config.from_email, to=emails) mail.attach('errors.csv', generate_errors_csv(result), 'text/csv') sent = mail.send() if not sent: task.log("Error mandando el reporte") def generate_errors_csv(result: list): out = StringIO() writer = csv.DictWriter(out, fieldnames=["serie_id", "error_pct", "api_url", "distribution_url"]) writer.writeheader() writer.writerows(result) out.seek(0) return out.read() @job("integration_test") def enqueue_new_integration_test(*_): timeout = IntegrationTestConfig.get_solo().timeout enqueue_job_with_timeout('integration_test', run_integration, timeout)
nilq/baby-python
python
import os import sys rszdir = "/home/inopia/webapps/mlfw_media/f/rsz/" #nqdir = thumbsdir + "png/" l = os.listdir(rszdir) l.sort() for imagefile in l: part = imagefile.lstrip("mlfw").partition(".") ext = part[2].lower() if part[0] in ("save", "png"): continue try: iid = int(part[0].partition("_")[0]) except: print(imagefile) sys.exit() if ext == "png" and iid > 1535: os.system("~/pngout-static " + rszdir + imagefile) # os.system("pngnq -s1 -e .png -d %s %s%s" % ( nqdir, thumbsdir, imagefile)) #l = os.listdir(nqdir) #for imagefile in l: # if imagefile.endswith(".png"): #os.system("~/pngout-static " + thumbsdir + imagefile)
nilq/baby-python
python
import os from d3m import utils D3M_API_VERSION = 'v2020.1.9' VERSION = "1.0.0" TAG_NAME = "{git_commit}".format(git_commit=utils.current_git_commit(os.path.dirname(__file__)), ) REPOSITORY = "https://github.com/brekelma/dsbox_graphs" PACKAGE_NAME_GRAPHS = "dsbox-graphs" D3M_PERFORMER_TEAM = 'ISI' if TAG_NAME: PACKAGE_URI_GRAPHS = "git+" + REPOSITORY + "@" + TAG_NAME else: PACKAGE_URI_GRAPHS = "git+" + REPOSITORY PACKAGE_URI_GRAPHS = PACKAGE_URI_GRAPHS + "#egg=" + PACKAGE_NAME_GRAPHS INSTALLATION_TYPE = 'GIT' if INSTALLATION_TYPE == 'PYPI': INSTALLATION = { "type" : "PIP", "package": PACKAGE_NAME_GRAPHS, "version": VERSION } else: INSTALLATION = { "type" : "PIP", "package_uri": PACKAGE_URI_GRAPHS, }
nilq/baby-python
python
#!/usr/bin/python3 import pickle import sys import numpy as np from scipy.stats import ks_2samp if __name__ == "__main__": if len(sys.argv) < 3: print("Usage: %s [max min diff dat] [stdev dat]" % (sys.argv[0])) exit() def plot(dataPerExperiment): smallestDiffMaxMin = None smallestDiffMaxMinOperator = None for experiment, data in dataPerExperiment.items(): # data is a list of list of int flatDataForPlot = [] maxs = [] mins = [] #modes = [] stdevs = [] for values in data: if values == None: continue for value in values: if value != "": flatDataForPlot.append(int(value)) maxs.append(max(values)) mins.append(min(values)) stdevs.append(np.std(values)) # we can't use mode since there are multiple most common #modes.append(mode(values)) return stdevs maxmindata = pickle.load(open(sys.argv[1], "rb")) stdevdata = pickle.load(open(sys.argv[2], "rb")) maxminstdevs = plot(maxmindata) stdevstdevs = plot(stdevdata) confidence = 0.05 ks = ks_2samp(maxminstdevs, stdevstdevs) # H0 - there is no significant difference between max min diff and stdev fitness stdevs for each protein print("max min diff - stdev: stdevs ks: %f" % (ks[1])) if ks[1] < confidence: print("significant") else: print("not significant")
nilq/baby-python
python
# Generated by Django 3.0.1 on 2020-06-14 03:19 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('articles', '0016_auto_20200607_1153'), ] operations = [ migrations.CreateModel( name='Report', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('short_reason', models.CharField(max_length=255)), ('long_reason', models.TextField()), ('date', models.DateTimeField(auto_now_add=True, null=True)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reports', to=settings.AUTH_USER_MODEL)), ('comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment_report', to='articles.Comment')), ('discussion', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='discussion_report', to='articles.Discussion')), ('reported', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reported', to=settings.AUTH_USER_MODEL)), ('review', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='review_report', to='articles.Review')), ], ), ]
nilq/baby-python
python
# coding=utf-8 from sii.resource import SII, SIIDeregister from sii.models.invoices_record import CRE_FACTURAS_EMITIDAS from sii.utils import unidecode_str, VAT from expects import * from datetime import datetime from spec.testing_data import DataGenerator, Tax, InvoiceLine, InvoiceTax from mamba import * import os def group_by_tax_rate(iva_values, in_invoice): aux_iva_values = {} if in_invoice: cuota_key = 'CuotaSoportada' else: cuota_key = 'CuotaRepercutida' for iva in iva_values: tipo_impositivo = iva.get('TipoImpositivo', 0.0) base_imponible = iva['BaseImponible'] cuota = iva.get(cuota_key, 0.0) if tipo_impositivo in aux_iva_values: aux = aux_iva_values[tipo_impositivo] aux['BaseImponible'] += base_imponible if aux.get(cuota_key, False): aux[cuota_key] += cuota else: aux_iva_values[tipo_impositivo] = iva.copy() return aux_iva_values with description('El XML Generado'): with before.all: self.data_gen = DataGenerator() with description('en la cabecera'): with before.all: # Example invoice to check common fields self.invoice = self.data_gen.get_out_invoice() self.invoice_obj = SII(self.invoice).generate_object() self.cabecera = ( self.invoice_obj['SuministroLRFacturasEmitidas']['Cabecera'] ) with it('la versión es la "1.1"'): expect(self.cabecera['IDVersionSii']).to(equal('1.1')) with context('cuando es de tipo alta'): with it('el tipo de comunicación debe ser "A0"'): expect(self.cabecera['TipoComunicacion']).to(equal('A0')) with context('cuando es de tipo modificación'): with before.all: new_data_gen = DataGenerator(invoice_registered=True) invoice = new_data_gen.get_out_invoice() invoice_obj = SII(invoice).generate_object() self.cabecera = ( invoice_obj['SuministroLRFacturasEmitidas']['Cabecera'] ) with it('el tipo de comunicación debe ser "A1"'): expect(self.cabecera['TipoComunicacion']).to(equal('A1')) with context('en el titular'): with it('el nif deben ser los del titular'): expect( self.cabecera['Titular']['NIF'] ).to(equal( VAT.clean_vat(self.invoice.company_id.partner_id.vat) )) with it('el nombre y apellidos deben ser los del titular'): expect( self.cabecera['Titular']['NombreRazon'] ).to(equal( unidecode_str(self.invoice.company_id.partner_id.name)) ) with description('en los datos comunes de una factura'): with before.all: # Example invoice to check common fields self.invoice = self.data_gen.get_out_invoice() self.invoice_obj = SII(self.invoice).generate_object() self.factura = ( self.invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas'] ) with context('en los NIFs involucrados'): with before.all: os.environ['NIF_TITULAR'] = 'ES12345678T' os.environ['NIF_CONTRAPARTE'] = 'esES654321P' new_data_gen = DataGenerator() nifs_test_invoice = new_data_gen.get_out_invoice() self.nif_contraparte = nifs_test_invoice.partner_id.vat[2:] self.nif_titular = ( nifs_test_invoice.company_id.partner_id.vat[2:] ) self.nifs_test_obj = SII(nifs_test_invoice).generate_object() with it('el NIF del Titular no debe empezar por "ES"'): expect( self.nifs_test_obj['SuministroLRFacturasEmitidas'] ['Cabecera']['Titular']['NIF'] ).to(equal(self.nif_titular)) with it('el NIF de la Contraparte no debe empezar por "ES"'): expect( self.nifs_test_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ['Contraparte']['NIF'] ).to(equal(self.nif_contraparte)) with it('la ClaveRegimenEspecialOTrascendencia debe ser válido'): expect( dict(CRE_FACTURAS_EMITIDAS).keys() ).to(contain( (self.factura['FacturaExpedida'] ['ClaveRegimenEspecialOTrascendencia']) )) with it('la descripción de la operación debe ser el de la factura'): expect( self.factura['FacturaExpedida']['DescripcionOperacion'] ).to(equal(self.invoice.sii_description)) with it('el número de la factura debe ser el de la factura original'): expect( self.factura['IDFactura']['NumSerieFacturaEmisor'] ).to(equal(self.invoice.number)) with it('el tipo de la factura es "F1"'): expect( self.factura['FacturaExpedida']['TipoFactura'] ).to(equal('F1')) with context('en los datos del período'): with before.all: self.periodo = self.factura['PeriodoLiquidacion'] with it('el ejercicio es el correspondiente al año de la factura'): expect( self.periodo['Ejercicio'] ).to(equal(self.invoice.period_id.name[3:7])) with it('el período es el correspondiente al mes de la factura'): expect( self.periodo['Periodo'] ).to(equal(self.invoice.period_id.name[0:2])) with description('en los datos de una factura emitida'): with before.all: self.out_invoice = self.data_gen.get_out_invoice() self.out_invoice_obj = SII(self.out_invoice).generate_object() self.factura_emitida = ( self.out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas'] ) with context('en una contraparte con NIF no registrado en la AEAT'): with before.all: new_data_gen = DataGenerator(contraparte_registered=False) self.out_invoice = new_data_gen.get_out_invoice() self.nif_contraparte = self.out_invoice.partner_id.vat[2:] out_invoice_obj = SII(self.out_invoice).generate_object() self.contraparte = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ['Contraparte'] ) with it('el ID debe ser el NIF de la contraparte'): expect( self.contraparte['IDOtro']['ID'] ).to(equal(self.nif_contraparte)) with it('el IDType debe ser "07"'): expect(self.contraparte['IDOtro']['IDType']).to(equal('07')) with it('el CodigoPais debe ser "ES"'): expect(self.contraparte['IDOtro']['CodigoPais']).to(equal('ES')) with context('en los detalles del IVA'): with before.all: detalle_iva = ( self.factura_emitida['FacturaExpedida']['TipoDesglose'] ['DesgloseFactura']['Sujeta']['NoExenta']['DesgloseIVA'] ['DetalleIVA'] ) self.grouped_detalle_iva = group_by_tax_rate( detalle_iva, in_invoice=False ) with it('la BaseImponible debe ser la original'): expect( self.grouped_detalle_iva[21.0]['BaseImponible'] ).to(equal( self.out_invoice.tax_line[0].base )) with it('la CuotaRepercutida debe ser la original'): expect( self.grouped_detalle_iva[21.0]['CuotaRepercutida'] ).to(equal( self.out_invoice.tax_line[0].tax_amount )) with it('el TipoImpositivo debe ser la original'): expect( self.grouped_detalle_iva[21.0]['TipoImpositivo'] ).to(equal( self.out_invoice.tax_line[0].tax_id.amount * 100 )) with context('en los detalles del IVA inversion sujeto pasivo'): with before.all: name_iva_isp = 'IVA 21% Inv. Sujeto pasivo' tax_iva_isp = Tax(name=name_iva_isp, amount=0, type='percent') self.out_invoice.invoice_line.append(InvoiceLine( price_subtotal=3200, invoice_line_tax_id=[tax_iva_isp] )) base_iva_isp = sum( [line.price_subtotal for line in self.out_invoice.invoice_line if tax_iva_isp in line.invoice_line_tax_id] ) invoice_tax_isp = InvoiceTax( name=name_iva_isp, base=base_iva_isp, tax_amount=base_iva_isp * tax_iva_isp.amount, tax_id=tax_iva_isp ) self.out_invoice.tax_line.append(invoice_tax_isp) self.out_invoice_obj = SII(self.out_invoice).generate_object() self.factura_emitida = ( self.out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas'] ) detalle_iva_isp = ( self.factura_emitida['FacturaExpedida']['TipoDesglose'] ['DesgloseTipoOperacion']['Entrega']['Sujeta']['NoExenta'] ['DesgloseIVA']['DetalleIVA'] ) self.grouped_detalle_iva_isp = group_by_tax_rate( detalle_iva_isp, in_invoice=False ) with it('la BaseImponible debe ser la original'): expect( self.grouped_detalle_iva_isp[0.0]['BaseImponible'] ).to(equal( self.out_invoice.tax_line[4].base )) with it('la CuotaRepercutida debe ser la original'): expect( self.grouped_detalle_iva_isp[0.0]['CuotaRepercutida'] ).to(equal( self.out_invoice.tax_line[4].tax_amount )) with it('el TipoImpositivo debe ser la original'): expect( self.grouped_detalle_iva_isp[0.0]['TipoImpositivo'] ).to(equal( self.out_invoice.tax_line[4].tax_id.amount * 100 )) with context('si es una exportación'): with before.all: # Clave Régimen Especial exportación: '02' self.cre_exportacion = '02' self.out_invoice.sii_out_clave_regimen_especial = ( self.cre_exportacion ) self.export_inv_obj = SII(self.out_invoice).generate_object() self.factura_emitida = ( self.export_inv_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas'] ) with context('en los detalles del IVA'): with before.all: detalle_iva = ( self.factura_emitida['FacturaExpedida']['TipoDesglose'] ['DesgloseTipoOperacion']['Entrega']['Sujeta'] ['NoExenta']['DesgloseIVA']['DetalleIVA'] ) self.grouped_detalle_iva = group_by_tax_rate( detalle_iva, in_invoice=False ) with it('la BaseImponible debe ser la original'): expect( self.grouped_detalle_iva[21.0]['BaseImponible'] ).to(equal( self.out_invoice.tax_line[0].base )) with it('la CuotaRepercutida debe ser la original'): expect( self.grouped_detalle_iva[21.0]['CuotaRepercutida'] ).to(equal( self.out_invoice.tax_line[0].tax_amount )) with it('el TipoImpositivo debe ser la original'): expect( self.grouped_detalle_iva[21.0]['TipoImpositivo'] ).to(equal( self.out_invoice.tax_line[0].tax_id.amount * 100 )) with context('si es una operación de alquiler (CRE "12" o "13")'): with before.all: new_data_gen = DataGenerator() self.out_invoice = new_data_gen.get_out_invoice() self.out_invoice.sii_out_clave_regimen_especial = '12' provincia = ( self.out_invoice.address_contact_id.state_id ) self.comunidad_autonoma = provincia.comunitat_autonoma with context('si el inmueble pertenece a España'): with it('si tiene referencia catastral'): ref_catastral = '9872023 VH5797S 0001 WX' self.out_invoice.address_contact_id.ref_catastral = \ ref_catastral out_invoice_obj = SII(self.out_invoice).generate_object() factura_expedida = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ) detalle_inmueble = ( factura_expedida['DatosInmueble']['DetalleInmueble'] ) expect( dict(CRE_FACTURAS_EMITIDAS).keys() ).to(contain( (factura_expedida[ 'ClaveRegimenEspecialOTrascendencia' ]) )) expect(detalle_inmueble['ReferenciaCatastral']).to(equal( ref_catastral )) with context('si no tiene referencia catastral'): with it('no debe tener referencia catastral'): ref_catastral = '9872023 VH5797S 0001 WX' self.out_invoice.address_contact_id.ref_catastral = \ False out_invoice_obj = \ SII(self.out_invoice).generate_object() factura_expedida = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ) detalle_inmueble = ( factura_expedida['DatosInmueble']['DetalleInmueble'] ) expect( dict(CRE_FACTURAS_EMITIDAS).keys() ).to(contain( (factura_expedida[ 'ClaveRegimenEspecialOTrascendencia' ]) )) expect(detalle_inmueble.keys()).not_to( contain('ReferenciaCatastral') ) with it('si no es de Navarra ni País Basco la situación ' 'inmueble debe ser "1"'): self.comunidad_autonoma.codi = '01' out_invoice_obj = SII(self.out_invoice).generate_object() detalle_inmueble = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ['DatosInmueble']['DetalleInmueble'] ) expect(detalle_inmueble['SituacionInmueble']).to(equal('1')) with it('si es de Navarra la situación inmueble debe ser "2"'): self.comunidad_autonoma.codi = '15' out_invoice_obj = SII(self.out_invoice).generate_object() detalle_inmueble = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ['DatosInmueble']['DetalleInmueble'] ) expect(detalle_inmueble['SituacionInmueble']).to(equal('2')) with it('si es de País Basco la situación inmueble ' 'debe ser "2"'): self.comunidad_autonoma.codi = '16' out_invoice_obj = SII(self.out_invoice).generate_object() detalle_inmueble = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ['DatosInmueble']['DetalleInmueble'] ) expect(detalle_inmueble['SituacionInmueble']).to(equal('2')) with context('si el inmueble no pertenece a España'): with it('la situación inmueble debe ser "4"'): self.comunidad_autonoma.codi = '20' out_invoice_obj = SII(self.out_invoice).generate_object() detalle_inmueble = ( out_invoice_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas']['FacturaExpedida'] ['DatosInmueble']['DetalleInmueble'] ) expect(detalle_inmueble['SituacionInmueble']).to(equal('4')) with description('en los datos de una factura recibida'): with before.all: self.in_invoice = self.data_gen.get_in_invoice() self.in_invoice_obj = SII(self.in_invoice).generate_object() self.factura_recibida = ( self.in_invoice_obj['SuministroLRFacturasRecibidas'] ['RegistroLRFacturasRecibidas'] ) with context('en los datos del emisor de la factura'): with context('si no está registrado en la AEAT'): with before.all: new_data_gen = DataGenerator(contraparte_registered=False) self.in_invoice = new_data_gen.get_in_invoice() # Valid French TVA FR23334175221 self.in_invoice.partner_id.country_id.code = 'FR' self.in_invoice.partner_id.vat = 'FR23334175221' in_invoice_obj = SII(self.in_invoice).generate_object() self.emisor_factura = ( in_invoice_obj['SuministroLRFacturasRecibidas'] ['RegistroLRFacturasRecibidas']['IDFactura'] ['IDEmisorFactura'] ) with it('el ID debe ser el NIF del emisor'): nif_emisor = self.in_invoice.partner_id.vat[2:] expect( self.emisor_factura['IDOtro']['ID'] ).to(equal(nif_emisor)) with it('el IDType debe ser "04"'): expect( self.emisor_factura['IDOtro']['IDType'] ).to(equal('04')) with it('el CodigoPais debe ser "FR"'): expect( self.emisor_factura['IDOtro']['CodigoPais'] ).to(equal('FR')) with context('en los detalles del IVA'): with before.all: detalle_iva_desglose_iva = ( self.factura_recibida['FacturaRecibida']['DesgloseFactura'] ['DesgloseIVA']['DetalleIVA'] ) self.grouped_detalle_iva = group_by_tax_rate( detalle_iva_desglose_iva, in_invoice=True ) with it('el detalle de DesgloseIVA debe ser la original'): expect( self.grouped_detalle_iva[21.0]['BaseImponible'] ).to(equal( self.in_invoice.tax_line[0].base )) expect( self.grouped_detalle_iva[21.0]['CuotaSoportada'] ).to(equal( self.in_invoice.tax_line[0].tax_amount )) expect( self.grouped_detalle_iva[21.0]['TipoImpositivo'] ).to(equal( self.in_invoice.tax_line[0].tax_id.amount * 100 )) with _it('el detalle de DesgloseIVA para importe no sujeto a ' 'impuesto debe ser correcto'): expect( self.grouped_detalle_iva[0.0]['BaseImponible'] ).to(equal( self.in_invoice.invoice_line[5].price_subtotal )) expect( self.grouped_detalle_iva[0.0]['CuotaSoportada'] ).to(equal(0)) expect( self.grouped_detalle_iva[0.0]['TipoImpositivo'] ).to(equal(0)) with context('si es una importación'): with before.all: # Clave Régimen Especial importación: '13' self.cre_importacion = '13' self.in_invoice.sii_in_clave_regimen_especial = ( self.cre_importacion ) self.import_inv_obj = SII(self.in_invoice).generate_object() self.factura_recibida = ( self.import_inv_obj['SuministroLRFacturasRecibidas'] ['RegistroLRFacturasRecibidas'] ) with context('en los detalles del IVA'): with it('el detalle de DesgloseIVA debe ser la original'): # TODO change TipoImpositivo and CuotaSoportada should be '0' detalle_iva_desglose_iva = ( self.factura_recibida['FacturaRecibida'] ['DesgloseFactura']['DesgloseIVA']['DetalleIVA'] ) self.grouped_detalle_iva = group_by_tax_rate( detalle_iva_desglose_iva, in_invoice=True ) expect( self.grouped_detalle_iva[21.0]['BaseImponible'] ).to(equal( self.in_invoice.tax_line[0].base )) expect( self.grouped_detalle_iva[21.0]['CuotaSoportada'] ).to(equal( self.in_invoice.tax_line[0].tax_amount )) expect( self.grouped_detalle_iva[21.0]['TipoImpositivo'] ).to(equal( self.in_invoice.tax_line[0].tax_id.amount * 100 )) with context('si es una factura del primer semestre 2017'): with before.all: # Clave Régimen Especial para # Facturas Recibidas Primer Semestre 2017: '14' self.cre_primer_semestre = '14' self.in_invoice.sii_in_clave_regimen_especial = ( self.cre_primer_semestre ) self.first_semester_in_inv_obj = ( SII(self.in_invoice).generate_object() ) self.factura_recibida = ( self.first_semester_in_inv_obj ['SuministroLRFacturasRecibidas'] ['RegistroLRFacturasRecibidas'] ) with it('debe tener Clave de Régimen Especial "14"'): expect( self.factura_recibida['FacturaRecibida'] ['ClaveRegimenEspecialOTrascendencia'] ).to(equal(self.cre_primer_semestre)) with it('la cuota deducible debe ser 0'): expect( self.factura_recibida['FacturaRecibida']['CuotaDeducible'] ).to(equal(0)) with it('la fecha de registro contable debe ser la fecha del ' 'envío'): expect( self.factura_recibida['FacturaRecibida'] ['FechaRegContable'] ).to(equal(datetime.today().strftime('%d-%m-%Y'))) with description('en los datos de una factura rectificativa emitida'): with before.all: self.out_refund = self.data_gen.get_out_refund_invoice() self.out_refund_obj = SII(self.out_refund).generate_object() self.fact_rect_emit = ( self.out_refund_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas'] ) with context('en los datos de rectificación'): with it('el TipoRectificativa debe ser por sustitución (S)'): expect( self.fact_rect_emit['FacturaExpedida']['TipoRectificativa'] ).to(equal('S')) with before.all: self.importe_rectificacion = ( self.fact_rect_emit['FacturaExpedida'] ['ImporteRectificacion'] ) with it('la BaseRectificada debe ser 0'): expect( self.importe_rectificacion['BaseRectificada'] ).to(equal(0)) with it('la CuotaRectificada debe ser 0'): expect( self.importe_rectificacion['CuotaRectificada'] ).to(equal(0)) with context('en los detalles del IVA'): with before.all: detalle_iva = ( self.fact_rect_emit['FacturaExpedida']['TipoDesglose'] ['DesgloseFactura']['Sujeta']['NoExenta']['DesgloseIVA'] ['DetalleIVA'] ) self.grouped_detalle_iva = group_by_tax_rate( detalle_iva, in_invoice=False ) with it('la BaseImponible debe ser la original'): expect( self.grouped_detalle_iva[21.0]['BaseImponible'] ).to(equal( -1 * abs(self.out_refund.tax_line[0].base) )) with it('la CuotaRepercutida debe ser la original'): expect( self.grouped_detalle_iva[21.0]['CuotaRepercutida'] ).to(equal( -1 * abs(self.out_refund.tax_line[0].tax_amount) )) with it('el TipoImpositivo debe ser la original'): expect( self.grouped_detalle_iva[21.0]['TipoImpositivo'] ).to(equal( self.out_refund.tax_line[0].tax_id.amount * 100 )) with description('en los datos de una factura rectificativa recibida'): with before.all: self.in_refund = self.data_gen.get_in_refund_invoice() self.in_refund_obj = SII(self.in_refund).generate_object() self.fact_rect_recib = ( self.in_refund_obj['SuministroLRFacturasRecibidas'] ['RegistroLRFacturasRecibidas'] ) with context('en los datos de rectificación'): with it('el TipoRectificativa debe ser por sustitución (S)'): expect( self.fact_rect_recib['FacturaRecibida']['TipoRectificativa'] ).to(equal('S')) with before.all: self.importe_rectificacion = ( self.fact_rect_recib['FacturaRecibida'] ['ImporteRectificacion'] ) with it('la BaseRectificada debe ser 0'): expect( self.importe_rectificacion['BaseRectificada'] ).to(equal(0)) with it('la CuotaRectificada debe ser 0'): expect( self.importe_rectificacion['CuotaRectificada'] ).to(equal(0)) with context('en los detalles del IVA'): with before.all: detalle_iva = ( self.fact_rect_recib['FacturaRecibida']['DesgloseFactura'] ['DesgloseIVA']['DetalleIVA'] ) self.grouped_detalle_iva = group_by_tax_rate( detalle_iva, in_invoice=True ) with it('la BaseImponible debe ser la original'): expect( self.grouped_detalle_iva[21.0]['BaseImponible'] ).to(equal( -1 * abs(self.in_refund.tax_line[0].base) )) with it('la CuotaRepercutida debe ser la original'): expect( self.grouped_detalle_iva[21.0]['CuotaSoportada'] ).to(equal( -1 * abs(self.in_refund.tax_line[0].tax_amount) )) with it('el TipoImpositivo debe ser la original'): expect( self.grouped_detalle_iva[21.0]['TipoImpositivo'] ).to(equal( self.in_refund.tax_line[0].tax_id.amount * 100 )) with description('en los datos de una factura emitida rectificativa ' 'sin anuladora RA'): with before.all: self.out_invoice_RA = self.data_gen.get_out_invoice_RA() self.out_invoice_RA.rectifying_id.sii_registered = True self.out_invoice_RA_obj = SII(self.out_invoice_RA).generate_object() self.fact_RA_emitida = ( self.out_invoice_RA_obj['SuministroLRFacturasEmitidas'] ['RegistroLRFacturasEmitidas'] ) with context('en los datos de rectificación'): with it('el TipoRectificativa debe ser por sustitución (S)'): expect( self.fact_RA_emitida['FacturaExpedida']['TipoRectificativa'] ).to(equal('S')) with it('debe contener las FacturasRectificadas'): expect( self.fact_RA_emitida['FacturaExpedida'] ['FacturasRectificadas']['IDFacturaRectificada'][0] ['NumSerieFacturaEmisor'] ).to(equal( self.out_invoice_RA.rectifying_id.number )) fecha_expedicion = ( self.fact_RA_emitida['FacturaExpedida'] ['FacturasRectificadas']['IDFacturaRectificada'][0] ['FechaExpedicionFacturaEmisor'] ) expect( datetime.strptime( fecha_expedicion, '%d-%m-%Y' ).strftime('%Y-%m-%d') ).to(equal( self.out_invoice_RA.rectifying_id.date_invoice )) with it('debe contener el ImporteRectificacion'): expect( self.fact_RA_emitida['FacturaExpedida'] ['ImporteRectificacion']['BaseRectificada'] ).to(equal( self.out_invoice_RA.rectifying_id.amount_untaxed )) expect( self.fact_RA_emitida['FacturaExpedida'] ['ImporteRectificacion']['CuotaRectificada'] ).to(equal( self.out_invoice_RA.rectifying_id.amount_tax )) with description('El XML Generado en una baja de una factura emitida'): with before.all: self.data_gen = DataGenerator() with description('en la cabecera'): with before.all: self.invoice = self.data_gen.get_out_invoice() self.invoice_obj = ( SIIDeregister(self.invoice).generate_deregister_object() ) self.cabecera = ( self.invoice_obj['BajaLRFacturasEmitidas']['Cabecera'] ) with it('la versión es la "1.1"'): expect(self.cabecera['IDVersionSii']).to(equal('1.1')) with it('no debe contener el campo "TipoComunicacion"'): expect(self.cabecera).not_to(have_key('TipoComunicacion')) with context('en el titular'): with it('el nif deben ser los del titular'): expect( self.cabecera['Titular']['NIF'] ).to(equal( VAT.clean_vat(self.invoice.company_id.partner_id.vat) )) with it('el nombre y apellidos deben ser los del titular'): expect( self.cabecera['Titular']['NombreRazon'] ).to(equal( unidecode_str(self.invoice.company_id.partner_id.name)) ) with description('en la baja de una factura'): with before.all: self.invoice = self.data_gen.get_out_invoice() self.invoice_obj = ( SIIDeregister(self.invoice).generate_deregister_object() ) self.factura_emitida = ( self.invoice_obj['BajaLRFacturasEmitidas'] ['RegistroLRBajaExpedidas'] ) with context('en los datos del período'): with before.all: self.periodo = self.factura_emitida['PeriodoLiquidacion'] with it('el ejercicio es el correspondiente al año de la factura'): expect( self.periodo['Ejercicio'] ).to(equal(self.invoice.period_id.name[3:7])) with it('el período es el correspondiente al mes de la factura'): expect( self.periodo['Periodo'] ).to(equal(self.invoice.period_id.name[0:2])) with context('en los datos de la factura'): with before.all: self.factura = self.factura_emitida['IDFactura'] with it('el NIF del emisor de la factura es correcto'): expect( self.factura['IDEmisorFactura']['NIF'] ).to(equal( VAT.clean_vat(self.invoice.company_id.partner_id.vat) )) with it('el número de factura es correcto'): expect( self.factura['NumSerieFacturaEmisor'] ).to(equal( self.invoice.number )) with it('la fecha de factura es correcto'): expect( datetime.strptime( self.factura['FechaExpedicionFacturaEmisor'], '%d-%m-%Y' ).strftime('%Y-%m-%d') ).to(equal( self.invoice.date_invoice )) with description('El XML Generado en una baja de una factura recibida'): with before.all: self.data_gen = DataGenerator() with description('en la cabecera'): with before.all: self.invoice = self.data_gen.get_in_invoice() self.invoice_obj = ( SIIDeregister(self.invoice).generate_deregister_object() ) self.cabecera = ( self.invoice_obj['BajaLRFacturasRecibidas']['Cabecera'] ) with it('la versión es la "1.1"'): expect(self.cabecera['IDVersionSii']).to(equal('1.1')) with it('no debe contener el campo "TipoComunicacion"'): expect(self.cabecera).not_to(have_key('TipoComunicacion')) with context('en el titular'): with it('el nif deben ser los del titular'): expect( self.cabecera['Titular']['NIF'] ).to(equal( VAT.clean_vat(self.invoice.company_id.partner_id.vat) )) with it('el nombre y apellidos deben ser los del titular'): expect( self.cabecera['Titular']['NombreRazon'] ).to(equal( unidecode_str(self.invoice.company_id.partner_id.name)) ) with description('en la baja de una factura'): with before.all: self.invoice = self.data_gen.get_in_invoice() self.invoice_obj = ( SIIDeregister(self.invoice).generate_deregister_object() ) self.factura_recibida = ( self.invoice_obj['BajaLRFacturasRecibidas'] ['RegistroLRBajaRecibidas'] ) with context('en los datos del período'): with before.all: self.periodo = self.factura_recibida['PeriodoLiquidacion'] with it('el ejercicio es el correspondiente al año de la factura'): expect( self.periodo['Ejercicio'] ).to(equal(self.invoice.period_id.name[3:7])) with it('el período es el correspondiente al mes de la factura'): expect( self.periodo['Periodo'] ).to(equal(self.invoice.period_id.name[0:2])) with context('en los datos de la factura'): with before.all: self.factura = self.factura_recibida['IDFactura'] with it('el nombre del emisor de la factura es correcto'): expect( self.factura['IDEmisorFactura']['NombreRazon'] ).to(equal( unidecode_str(self.invoice.partner_id.name) )) with it('el NIF del emisor de la factura es correcto'): expect( self.factura['IDEmisorFactura']['NIF'] ).to(equal( VAT.clean_vat(self.invoice.partner_id.vat) )) with it('el número de factura es correcto'): expect( self.factura['NumSerieFacturaEmisor'] ).to(equal( self.invoice.origin )) with it('la fecha de factura es correcto'): expect( datetime.strptime( self.factura['FechaExpedicionFacturaEmisor'], '%d-%m-%Y' ).strftime('%Y-%m-%d') ).to(equal( self.invoice.origin_date_invoice ))
nilq/baby-python
python
from typing import Union import spacy regex = [r"\bsofa\b"] method_regex = ( r"sofa.*?((?P<max>max\w*)|(?P<vqheures>24h\w*)|" r"(?P<admission>admission\w*))(?P<after_value>(.|\n)*)" ) value_regex = r".*?.[\n\W]*?(\d+)[^h\d]" score_normalization_str = "score_normalization.sofa" @spacy.registry.misc(score_normalization_str) def score_normalization(extracted_score: Union[str, None]): """ Sofa score normalization. If available, returns the integer value of the SOFA score. """ score_range = list(range(0, 30)) if (extracted_score is not None) and (int(extracted_score) in score_range): return int(extracted_score)
nilq/baby-python
python
import cloudmesh user = cloudmesh.load() print user.cloudnames()
nilq/baby-python
python
import sqlite3 con = sqlite3.connect(":memory:") con.row_factory = sqlite3.Row cur = con.cursor() cur.execute("select 'John' as name, 42 as age") for row in cur: assert row[0] == row["name"] assert row["name"] == row["nAmE"] assert row[1] == row["age"] assert row[1] == row["AgE"] con.close()
nilq/baby-python
python