hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c1dca0b89e8bb1dc105dcc9d3b29afa3ffcd2047 | 221 | py | Python | mushroom_rl/features/tensors/constant_tensor.py | k4ntz/mushroom-rl | 17c8e9b2a9648a59169f3599c4ef8d259afc39f4 | [
"MIT"
] | 1 | 2020-11-06T18:32:32.000Z | 2020-11-06T18:32:32.000Z | mushroom_rl/features/tensors/constant_tensor.py | AmmarFahmy/mushroom-rl | 2625ee7f64d5613b3b9fba00f0b7a39fece88ca5 | [
"MIT"
] | null | null | null | mushroom_rl/features/tensors/constant_tensor.py | AmmarFahmy/mushroom-rl | 2625ee7f64d5613b3b9fba00f0b7a39fece88ca5 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class ConstantTensor(nn.Module):
"""
Pytorch module to implement a constant function (always one).
"""
def forward(self, x):
return torch.ones(x.shape[0], 1)
| 17 | 65 | 0.647059 |
cde951d77cec4f2a944b9addb748fe561a9c1a1d | 14,285 | py | Python | userbot/plugins/gDrive.py | shadowninja024/Shadowninja_userbot | 0e73de64d8105bdc179fa467c5730f9a9f58452f | [
"MIT"
] | 23 | 2020-06-20T09:02:59.000Z | 2020-11-29T12:01:37.000Z | userbot/plugins/gDrive.py | madhav2726/JaaduBot | 3716d329d5e669ee59a154e170a8f907d38aa6db | [
"MIT"
] | null | null | null | userbot/plugins/gDrive.py | madhav2726/JaaduBot | 3716d329d5e669ee59a154e170a8f907d38aa6db | [
"MIT"
] | 128 | 2020-06-20T09:03:21.000Z | 2021-11-16T07:15:40.000Z | # The entire code given below is verbatim copied from
# https://github.com/cyberboysumanjay/Gdrivedownloader/blob/master/gdrive_upload.py
# there might be some changes made to suit the needs for this repository
# Licensed under MIT License
import asyncio
import os
import time
import math
from datetime import datetime
from telethon import events
from uniborg.util import admin_cmd, progress
#
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from apiclient.errors import ResumableUploadError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import file, client, tools
from mimetypes import guess_type
import httplib2
# Path to token json file, it should be in same directory as script
G_DRIVE_TOKEN_FILE = Var.TEMP_DOWNLOAD_DIRECTORY + "/auth_token.txt"
# Copy your credentials from the APIs Console
CLIENT_ID = Var.G_DRIVE_CLIENT_ID
CLIENT_SECRET = Var.G_DRIVE_CLIENT_SECRET
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = "https://www.googleapis.com/auth/drive.file"
# Redirect URI for installed apps, can be left as is
REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
parent_id = Var.GDRIVE_FOLDER_ID
G_DRIVE_DIR_MIME_TYPE = "application/vnd.google-apps.folder"
#@command(pattern="^.ugdrive ?(.*)")
@borg.on(admin_cmd(pattern=r"ugdrive ?(.*)"))
async def _(event):
if event.fwd_from:
return
mone = await event.reply("Processing ...")
if CLIENT_ID is None or CLIENT_SECRET is None:
await mone.edit("This module requires credentials from https://da.gd/so63O. Aborting!")
return False
input_str = event.pattern_match.group(1)
if not os.path.isdir(Var.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Var.TEMP_DOWNLOAD_DIRECTORY)
required_file_name = None
start = datetime.now()
if event.reply_to_msg_id and not input_str:
reply_message = await event.get_reply_message()
try:
c_time = time.time()
await mone.edit("Downloading to Local...")
downloaded_file_name = await bot.download_media(
reply_message,
Var.TEMP_DOWNLOAD_DIRECTORY
)
except Exception as e: # pylint:disable=C0103,W0703
await mone.edit(str(e))
return False
else:
end = datetime.now()
ms = (end - start).seconds
required_file_name = downloaded_file_name
await mone.edit("Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms))
elif input_str:
input_str = input_str.strip()
if os.path.exists(input_str):
end = datetime.now()
ms = (end - start).seconds
required_file_name = input_str
await mone.edit("Found `{}` in {} seconds.".format(input_str, ms))
else:
await mone.edit("File Not found in local server. Give me a file path :((")
return False
# logger.info(required_file_name)
if required_file_name:
# Check if token file exists, if not create it by requesting authorization code
try:
with open(G_DRIVE_TOKEN_FILE) as f:
pass
except IOError:
storage = await create_token_file(G_DRIVE_TOKEN_FILE, event)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
f = open(G_DRIVE_TOKEN_FILE, "r")
token_file_data = f.read()
await event.client.send_message(int(Var.PRIVATE_GROUP_ID), "Please add Var AUTH_TOKEN_DATA with the following as the value:\n\n`" + token_file_data + "`")
# Authorize, get file parameters, upload file and print out result URL for download
http = authorize(G_DRIVE_TOKEN_FILE, None)
file_name, mime_type = file_ops(required_file_name)
# required_file_name will have the full path
# Sometimes API fails to retrieve starting URI, we wrap it.
try:
g_drive_link = await upload_file(http, required_file_name, file_name, mime_type,mone,parent_id)
await mone.edit("__Successfully Uploaded File on G-Drive :__\n[{}]({})".format(file_name,g_drive_link))
except Exception as e:
await mone.edit(f"Exception occurred while uploading to gDrive {e}")
else:
await mone.edit("File Not found in local server. Give me a file path :((")
#@command(pattern="^.drivesch ?(.*)")
@borg.on(admin_cmd(pattern=r"drivesch ?(.*)"))
async def sch(event):
if event.fwd_from:
return
if CLIENT_ID is None or CLIENT_SECRET is None:
await event.edit("This module requires credentials from https://da.gd/so63O. Aborting!")
return False
try:
with open(G_DRIVE_TOKEN_FILE) as f:
pass
except IOError:
storage = await create_token_file(G_DRIVE_TOKEN_FILE, event)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
f = open(G_DRIVE_TOKEN_FILE, "r")
token_file_data = f.read()
await event.client.send_message(int(Var.PRIVATE_GROUP_ID), "Please add Var AUTH_TOKEN_DATA with the following as the value:\n\n`" + token_file_data + "`")
# Authorize, get file parameters, upload file and print out result URL for download
http = authorize(G_DRIVE_TOKEN_FILE, None)
input_str = event.pattern_match.group(1).strip()
await event.edit("Searching for {} in G-Drive.".format(input_str))
if parent_id is not None:
query = "'{}' in parents and (title contains '{}')".format(parent_id, input_str)
else:
query = "title contains '{}'".format(input_str)
query = "'{}' in parents and (title contains '{}')".format(parent_id,input_str)#search_query(parent_id,input_str)
msg = await gsearch(http,query,input_str)
await event.edit(str(msg))
async def gsearch(http,query,filename):
drive_service = build("drive", "v2", http=http)
page_token = None
msg = "**G-Drive Search Query**\n`"+filename+"`\n**Results**\n"
while True:
response = drive_service.files().list(q=query,
spaces='drive',
fields='nextPageToken, items(id, title, mimeType)',
pageToken=page_token).execute()
for file in response.get('items',[]):
if file.get('mimeType') == "application/vnd.google-apps.folder":
msg +="⁍ [{}](https://drive.google.com/drive/folders/{}) (folder)".format(file.get('title'),file.get('id'))+"\n"
# Process change
else:
msg += "⁍ [{}](https://drive.google.com/uc?id={}&export=download)".format(file.get('title'),file.get('id'))+"\n"
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return msg
#@command(pattern="^.gdrivedir ?(.*)")
@borg.on(admin_cmd(pattern=r"gdrivedir ?(.*)"))
async def _(event):
if event.fwd_from:
return
if CLIENT_ID is None or CLIENT_SECRET is None:
await event.edit("This module requires credentials from https://da.gd/so63O. Aborting!")
return
if Var.PRIVATE_GROUP_ID is None:
await event.edit("Please set the required environment variable `PRIVATE_GROUP_ID` for this plugin to work")
return
input_str = event.pattern_match.group(1)
if os.path.isdir(input_str):
# TODO: remove redundant code
#
if Var.AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(Var.AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting authorization code
storage = None
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, event)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
f = open(G_DRIVE_TOKEN_FILE, "r")
token_file_data = f.read()
await event.client.send_message(int(Var.PRIVATE_GROUP_ID), "Please add Var AUTH_TOKEN_DATA with the following as the value:\n\n`" + token_file_data + "`")
# Authorize, get file parameters, upload file and print out result URL for download
# first, create a sub-directory
await event.edit("Uploading `{}` to G-Drive...".format(input_str))
dir_id = await create_directory(http, os.path.basename(os.path.abspath(input_str)), parent_id)
await DoTeskWithDir(http, input_str, event, dir_id)
dir_link = "https://drive.google.com/folderview?id={}".format(dir_id)
await event.edit(f"__Successfully Uploaded Folder To G-Drive...__\n[{input_str}]({dir_link})")
else:
await event.edit(f"directory {input_str} does not seem to exist")
async def create_directory(http, directory_name, parent_id):
drive_service = build("drive", "v2", http=http, cache_discovery=False)
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
file_metadata = {
"title": directory_name,
"mimeType": G_DRIVE_DIR_MIME_TYPE
}
if parent_id is not None:
file_metadata["parents"] = [{"id": parent_id}]
file = drive_service.files().insert(body=file_metadata).execute()
file_id = file.get("id")
drive_service.permissions().insert(fileId=file_id, body=permissions).execute()
logger.info("Created Gdrive Folder:\nName: {}\nID: {} ".format(file.get("title"), file_id))
return file_id
async def DoTeskWithDir(http, input_directory, event, parent_id):
list_dirs = os.listdir(input_directory)
if len(list_dirs) == 0:
return parent_id
r_p_id = None
for a_c_f_name in list_dirs:
current_file_name = os.path.join(input_directory, a_c_f_name)
if os.path.isdir(current_file_name):
current_dir_id = await create_directory(http, a_c_f_name, parent_id)
r_p_id = await DoTeskWithDir(http, current_file_name, event, current_dir_id)
else:
file_name, mime_type = file_ops(current_file_name)
# current_file_name will have the full path
g_drive_link = await upload_file(http, current_file_name, file_name, mime_type, event, parent_id)
r_p_id = parent_id
# TODO: there is a #bug here :(
return r_p_id
# Get mime type and name of given file
def file_ops(file_path):
mime_type = guess_type(file_path)[0]
mime_type = mime_type if mime_type else "text/plain"
file_name = file_path.split("/")[-1]
return file_name, mime_type
async def create_token_file(token_file, event):
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(
CLIENT_ID,
CLIENT_SECRET,
OAUTH_SCOPE,
redirect_uri=REDIRECT_URI
)
authorize_url = flow.step1_get_authorize_url()
async with bot.conversation(int(Var.PRIVATE_GROUP_ID)) as conv:
await conv.send_message(f"Go to the following link in your browser: {authorize_url} and reply the code")
response = conv.wait_event(events.NewMessage(
outgoing=True,
chats=int(Var.PRIVATE_GROUP_ID)
))
response = await response
code = response.message.message.strip()
credentials = flow.step2_exchange(code)
storage = Storage(token_file)
storage.put(credentials)
return storage
def authorize(token_file, storage):
# Get credentials
if storage is None:
storage = Storage(token_file)
credentials = storage.get()
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
credentials.refresh(http)
http = credentials.authorize(http)
return http
async def upload_file(http, file_path, file_name, mime_type, event, parent_id):
# Create Google Drive service instance
drive_service = build("drive", "v2", http=http, cache_discovery=False)
# File body description
media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True)
body = {
"title": file_name,
"description": "Uploaded using Userbot gDrive v1",
"mimeType": mime_type,
}
if parent_id is not None:
body["parents"] = [{"id": parent_id}]
# Permissions body description: anyone who has link can upload
# Other permissions can be found at https://developers.google.com/drive/v2/reference/permissions
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
# Insert a file
file = drive_service.files().insert(body=body, media_body=media_body)
response = None
display_message = ""
while response is None:
status, response = file.next_chunk() #Credits: https://github.com/AvinashReddy3108/PaperplaneExtended/commit/df65da55d16a6563aa9023cac2bedf43248379f5
await asyncio.sleep(1)
if status:
percentage = int(status.progress() * 100)
progress_str = "[{0}{1}]\nProgress: {2}%\n".format(
"".join(["█" for i in range(math.floor(percentage / 5))]),
"".join(["░" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2)
)
current_message = f"Uploading to G-Drive:\nFile Name: `{file_name}`\n{progress_str}"
if display_message != current_message:
try:
await event.edit(current_message)
display_message = current_message
except Exception as e:
logger.info(str(e))
pass
file_id = response.get("id")
# Insert new permissions
drive_service.permissions().insert(fileId=file_id, body=permissions).execute()
# Define file instance and get url for download
file = drive_service.files().get(fileId=file_id).execute()
download_url = file.get("webContentLink")
return download_url
#@command(pattern="^.gfolder ?(.*)")
@borg.on(admin_cmd(pattern=r"gfolder ?(.*)"))
async def _(event):
if event.fwd_from:
return
folder_link = "https://drive.google.com/folderview?id="+parent_id
await event.edit("`Here is Your G-Drive Folder link : `\n"+folder_link)
| 43.1571 | 166 | 0.655793 |
f7e1bb96fe6c6fb13965fafab93d63f56939607d | 47,021 | py | Python | recipe/gen_patch_json.py | beckernick/conda-forge-repodata-patches-feedstock | f27bce69797ca4ea45ffd96068b7d68d208f6f8e | [
"BSD-3-Clause"
] | null | null | null | recipe/gen_patch_json.py | beckernick/conda-forge-repodata-patches-feedstock | f27bce69797ca4ea45ffd96068b7d68d208f6f8e | [
"BSD-3-Clause"
] | null | null | null | recipe/gen_patch_json.py | beckernick/conda-forge-repodata-patches-feedstock | f27bce69797ca4ea45ffd96068b7d68d208f6f8e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import copy
import json
import os
from os.path import join, isdir
import sys
import tqdm
import re
import requests
import pkg_resources
from get_license_family import get_license_family
CHANNEL_NAME = "conda-forge"
CHANNEL_ALIAS = "https://conda.anaconda.org"
SUBDIRS = (
"noarch",
"linux-64",
"linux-armv7l",
"linux-aarch64",
"linux-ppc64le",
"osx-64",
"osx-arm64",
"win-32",
"win-64",
)
REMOVALS = {
"noarch": (
"sendgrid-5.3.0-py_0.tar.bz2",
),
"linux-64": (
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"airflow-with-gcp_api-1.9.0-3.tar.bz2",
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"gdk-pixbuf-2.36.9-0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"libgsasl-1.8.0-py36_1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"postgis-2.4.3+9.6.8-0.tar.bz2",
"pyarrow-0.1.post-0.tar.bz2",
"pyarrow-0.1.post-1.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"rapidpy-2.5.2-py36_0.tar.bz2",
"smesh-8.3.0b0-1.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"tokenize-rt-2.0.1-py27_0.tar.bz2",
"vaex-core-0.4.0-py27_0.tar.bz2",
),
"osx-64": (
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"arpack-3.6.1-blas_openblash1f444ea_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"flask-rest-orm-0.5.0-py35_0.tar.bz2",
"flask-rest-orm-0.5.0-py36_0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"lammps-2018.03.16-.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"mpb-1.6.2-1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"reentry-1.1.0-py27_0.tar.bz2",
"resampy-0.2.0-py27_0.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"sundials-3.1.0-blas_openblash0edd121_202.tar.bz2",
"vlfeat-0.9.20-h470a237_2.tar.bz2",
"xtensor-python-0.19.1-h3e44d54_0.tar.bz2",
),
"osx-arm64": (
),
"win-32": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
),
"win-64": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
),
}
OPERATORS = ["==", ">=", "<=", ">", "<", "!="]
OSX_SDK_FIXES = {
'nodejs-12.8.0-hec2bf70_1': '10.10',
'nodejs-12.1.0-h6de7cb9_1': '10.10',
'nodejs-12.3.1-h6de7cb9_0': '10.10',
'nodejs-12.9.0-hec2bf70_0': '10.10',
'nodejs-12.9.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-hec2bf70_1': '10.10',
'nodejs-12.10.0-hec2bf70_0': '10.10',
'nodejs-12.4.0-h6de7cb9_0': '10.10',
'nodejs-12.11.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-h6de7cb9_0': '10.10',
'nodejs-12.3.0-h6de7cb9_0': '10.10',
'nodejs-10.16.3-hec2bf70_0': '10.10',
'nodejs-12.12.0-hfddbe92_0': '10.10',
'nodejs-12.8.1-hec2bf70_0': '10.10',
'javafx-sdk-11.0.4-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_0': '10.11',
'javafx-sdk-11.0.4-h6dcaf97_0': '10.11',
'qt-5.12.1-h1b46049_0': '10.12',
'qt-5.9.7-h8cf7e54_3': '10.12',
'qt-5.9.7-h93ee506_0': '10.12',
'qt-5.9.7-h93ee506_1': '10.12',
'qt-5.12.5-h1b46049_0': '10.12',
'qt-5.9.7-h93ee506_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_1': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_2': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_1': '10.12',
'freecad-0.18.3-py37h4764a83_2': '10.12',
'freecad-0.18.3-py37hc453731_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_0': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_1': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_2': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_1': '10.12',
'openmpi-mpicc-4.0.1-h516909a_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_0': '10.12',
'openmpi-mpifort-4.0.1-h6ad152f_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_0': '10.12',
'openmpi-mpifort-4.0.1-he991be0_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_2': '10.12',
'reaktoro-1.0.7-py37h99eb986_0': '10.12',
'reaktoro-1.0.7-py37h99eb986_1': '10.12',
'reaktoro-1.0.7-py36h99eb986_0': '10.12',
'reaktoro-1.0.7-py36h99eb986_1': '10.12',
'pyqt-5.12.3-py38he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py36he22c54c_1': '10.12',
'pyqt-5.9.2-py27h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_4': '10.12',
'pyqt-5.9.2-py36h2a560b1_3': '10.12',
'pyqt-5.9.2-py27h2a560b1_2': '10.12',
'pyqt-5.9.2-py36h2a560b1_1': '10.12',
'pyqt-5.12.3-py27h2a560b1_0': '10.12',
'pyqt-5.12.3-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py27he22c54c_0': '10.12',
'pyqt-5.12.3-py27he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_2': '10.12',
'pyqt-5.9.2-py37h2a560b1_1': '10.12',
'pyqt-5.9.2-py36h2a560b1_0': '10.12',
'pyqt-5.9.2-py36h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_0': '10.12',
'pyqt-5.9.2-py37h2a560b1_3': '10.12',
'pyqt-5.12.3-py38he22c54c_0': '10.12',
'pyqt-5.9.2-py27h2a560b1_3': '10.12',
'pyqt-5.9.2-py36h2a560b1_2': '10.12',
'pyqt-5.12.3-py37he22c54c_0': '10.12',
'pyqt-5.12.3-py36he22c54c_0': '10.12',
'pyqt-5.12.3-py37he22c54c_1': '10.12',
'pyqt-5.12.3-py36h2a560b1_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_0': '10.12',
'openmpi-4.0.1-ha90c164_2': '10.12',
'openmpi-4.0.1-ha90c164_0': '10.12',
'openmpi-4.0.1-hfcebdee_2': '10.12',
'openmpi-4.0.1-ha90c164_1': '10.12',
'openmpi-4.0.1-hc99cbb1_1': '10.12',
'openmpi-4.0.1-hc99cbb1_0': '10.12',
'openmpi-4.0.1-hc99cbb1_2': '10.12',
}
def _add_removals(instructions, subdir):
r = requests.get(
"https://conda.anaconda.org/conda-forge/"
"label/broken/%s/repodata.json" % subdir
)
if r.status_code != 200:
r.raise_for_status()
data = r.json()
currvals = list(REMOVALS.get(subdir, []))
for pkg_name in data["packages"]:
currvals.append(pkg_name)
instructions["remove"].extend(tuple(set(currvals)))
def _gen_patch_instructions(index, new_index, subdir):
instructions = {
"patch_instructions_version": 1,
"packages": defaultdict(dict),
"revoke": [],
"remove": [],
}
_add_removals(instructions, subdir)
# diff all items in the index and put any differences in the instructions
for fn in index:
assert fn in new_index
# replace any old keys
for key in index[fn]:
assert key in new_index[fn], (key, index[fn], new_index[fn])
if index[fn][key] != new_index[fn][key]:
instructions['packages'][fn][key] = new_index[fn][key]
# add any new keys
for key in new_index[fn]:
if key not in index[fn]:
instructions['packages'][fn][key] = new_index[fn][key]
return instructions
def has_dep(record, name):
return any(dep.split(' ')[0] == name for dep in record.get('depends', ()))
def get_python_abi(version, subdir, build=None):
if build is not None:
m = re.match(".*py\d\d", build)
if m:
version = f"{m.group()[-2]}.{m.group()[-1]}"
if version.startswith("2.7"):
if subdir.startswith("linux"):
return "cp27mu"
return "cp27m"
elif version.startswith("2.6"):
if subdir.startswith("linux"):
return "cp26mu"
return "cp26m"
elif version.startswith("3.4"):
return "cp34m"
elif version.startswith("3.5"):
return "cp35m"
elif version.startswith("3.6"):
return "cp36m"
elif version.startswith("3.7"):
return "cp37m"
elif version.startswith("3.8"):
return "cp38"
elif version.startswith("3.9"):
return "cp39"
return None
# Workaround for https://github.com/conda/conda-build/pull/3868
def remove_python_abi(record):
if record['name'] in ['python', 'python_abi', 'pypy']:
return
if not has_dep(record, 'python_abi'):
return
depends = record.get('depends', [])
record['depends'] = [dep for dep in depends if dep.split(" ")[0] != "python_abi"]
changes = set([])
def add_python_abi(record, subdir):
record_name = record['name']
# Make existing python and python-dependent packages conflict with pypy
if record_name == "python" and not record['build'].endswith("pypy"):
version = record['version']
new_constrains = record.get('constrains', [])
python_abi = get_python_abi(version, subdir)
new_constrains.append(f"python_abi * *_{python_abi}")
record['constrains'] = new_constrains
return
if has_dep(record, 'python') and not has_dep(record, 'pypy') and not has_dep(record, 'python_abi'):
python_abi = None
new_constrains = record.get('constrains', [])
build = record["build"]
ver_strict_found = False
ver_relax_found = False
for dep in record.get('depends', []):
dep_split = dep.split(' ')
if dep_split[0] == 'python':
if len(dep_split) == 3:
continue
if len(dep_split) == 1:
continue
elif dep_split[1] == "<3":
python_abi = get_python_abi("2.7", subdir, build)
elif dep_split[1].startswith(">="):
m = cb_pin_regex.match(dep_split[1])
if m == None:
python_abi = get_python_abi("", subdir, build)
else:
lower = pad_list(m.group("lower").split("."), 2)[:2]
upper = pad_list(m.group("upper").split("."), 2)[:2]
if lower[0] == upper[0] and int(lower[1]) + 1 == int(upper[1]):
python_abi = get_python_abi(m.group("lower"), subdir, build)
else:
python_abi = get_python_abi("", subdir, build)
else:
python_abi = get_python_abi(dep_split[1], subdir, build)
if python_abi:
new_constrains.append(f"python_abi * *_{python_abi}")
changes.add((dep, f"python_abi * *_{python_abi}"))
ver_strict_found = True
else:
ver_relax_found = True
if not ver_strict_found and ver_relax_found:
new_constrains.append("pypy <0a0")
record['constrains'] = new_constrains
def _gen_new_index(repodata, subdir):
"""Make any changes to the index by adjusting the values directly.
This function returns the new index with the adjustments.
Finally, the new and old indices are then diff'ed to produce the repo
data patches.
"""
index = copy.deepcopy(repodata["packages"])
# deal with windows vc features
if subdir.startswith("win-"):
python_vc_deps = {
'2.6': 'vc 9.*',
'2.7': 'vc 9.*',
'3.3': 'vc 10.*',
'3.4': 'vc 10.*',
'3.5': 'vc 14.*',
'3.6': 'vc 14.*',
'3.7': 'vc 14.*',
}
for fn, record in index.items():
record_name = record['name']
if record_name == 'python':
# remove the track_features key
if 'track_features' in record:
record['track_features'] = None
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append(python_vc_deps[record['version'][:3]])
record['depends'] = depends
elif 'vc' in record.get('features', ''):
# remove vc from the features key
vc_version = _extract_and_remove_vc_feature(record)
if vc_version:
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append('vc %d.*' % vc_version)
record['depends'] = depends
proj4_fixes = {"cartopy", "cdo", "gdal", "libspatialite", "pynio", "qgis"}
for fn, record in index.items():
record_name = record["name"]
if record.get('timestamp', 0) < 1604417730000:
if subdir == 'noarch':
remove_python_abi(record)
else:
add_python_abi(record, subdir)
if "license" in record and "license_family" not in record and record["license"]:
family = get_license_family(record["license"])
if family:
record['license_family'] = family
# remove dependency from constrains for twisted
if record_name == "twisted":
new_constrains = [dep for dep in record.get('constrains', ())
if not dep.startswith("pyobjc-framework-cococa")]
if new_constrains != record.get('constrains', ()):
record['constrains'] = new_constrains
if record_name == "starlette-base":
if not any(dep.split(' ')[0] == "starlette" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append(f"starlette {record['version']}")
else:
record['constrains'] = [f"starlette {record['version']}"]
if record_name == "pytorch" and record.get('timestamp', 0) < 1610297816658:
# https://github.com/conda-forge/pytorch-cpu-feedstock/issues/29
if not any(dep.split(' ')[0] == 'typing_extensions'
for dep in record.get('depends', ())):
if 'depends' in record:
record['depends'].append("typing_extensions")
else:
record['depends'] = ["typing_extensions"]
if record_name == "ipython" and record.get('timestamp', 0) < 1609621539000:
# https://github.com/conda-forge/ipython-feedstock/issues/127
if any(dep.split(' ')[0] == "jedi" for dep in record.get('depends', ())):
record['depends'].append('jedi <0.18')
if record_name == "kartothek" and record.get('timestamp', 0) < 1611565264000:
# https://github.com/conda-forge/kartothek-feedstock/issues/36
if "zstandard" in record['depends']:
i = record['depends'].index('zstandard')
record['depends'][i] = 'zstandard <0.15'
if record_name == "gitdb" and record['version'].startswith('4.0.') and 'smmap >=3.0.1' in record['depends']:
i = record['depends'].index('smmap >=3.0.1')
record['depends'][i] = 'smmap >=3.0.1,<4'
if record_name == "arrow-cpp":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if "aws-sdk-cpp" in record['depends']:
i = record['depends'].index('aws-sdk-cpp')
record['depends'][i] = 'aws-sdk-cpp 1.7.164'
if record_name == "pyarrow":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if record_name == "kartothek":
if record["version"] in ["3.15.0", "3.15.1", "3.16.0"] \
and "pyarrow >=0.13.0,!=0.14.0,<2" in record["depends"]:
i = record["depends"].index("pyarrow >=0.13.0,!=0.14.0,<2")
record["depends"][i] = "pyarrow >=0.17.1,<2"
# distributed <2.11.0 does not work with msgpack-python >=1.0
# newer versions of distributed require at least msgpack-python >=0.6.0
# so we can fix cases where msgpack-python is unbounded
# https://github.com/conda-forge/distributed-feedstock/pull/114
if record_name == 'distributed':
if 'msgpack-python' in record['depends']:
i = record['depends'].index('msgpack-python')
record['depends'][i] = 'msgpack-python <1.0.0'
# python-language-server <=0.31.9 requires pyflakes <2.2.2
# included explicitly in 0.31.10+
# https://github.com/conda-forge/python-language-server-feedstock/pull/50
version = record['version']
if record_name == 'python-language-server':
pversion = pkg_resources.parse_version(version)
v0_31_9 = pkg_resources.parse_version('0.31.9')
if pversion <= v0_31_9 and 'pyflakes >=1.6.0' in record['depends']:
i = record['depends'].index('pyflakes >=1.6.0')
record['depends'][i] = 'pyflakes >=1.6.0,<2.2.0'
# aioftp >=0.17.0 requires python >=3.7
# aioftp 0.17.x was incorrectly built with 3.6 support
# https://github.com/conda-forge/aioftp-feedstock/pull/12
version = record['version']
if record_name == 'aioftp':
pversion = pkg_resources.parse_version(version)
base_version = pkg_resources.parse_version('0.17.0')
max_version = pkg_resources.parse_version('0.17.2')
if base_version <= pversion <= max_version and 'python >=3.6' in record['depends']:
i = record['depends'].index('python >=3.6')
record['depends'][i] = 'python >=3.7'
# numpydoc >=1.0.0 requires python >=3.5
# https://github.com/conda-forge/numpydoc-feedstock/pull/14
version = record['version']
if record_name == 'numpydoc':
pversion = pkg_resources.parse_version(version)
v1_0_0 = pkg_resources.parse_version('1.0.0')
v1_1_0 = pkg_resources.parse_version('1.1.0')
if v1_0_0 <= pversion <= v1_1_0 and 'python' in record['depends']:
i = record['depends'].index('python')
record['depends'][i] = 'python >=3.5'
# pip >=21 requires python >=3.6 but the first build has >=3
# https://github.com/conda-forge/pip-feedstock/pull/68
if record_name == 'pip':
if record['version'] == "21.0" and record['build'] == "pyhd8ed1ab_0":
i = record['depends'].index('python >=3')
record['depends'][i] = 'python >=3.6'
# fix deps with wrong names
if record_name in proj4_fixes:
_rename_dependency(fn, record, "proj.4", "proj4")
if record_name == "airflow-with-async":
_rename_dependency(fn, record, "evenlet", "eventlet")
if record_name == "iris":
_rename_dependency(fn, record, "nc_time_axis", "nc-time-axis")
if (record_name == "r-base" and
not any(dep.startswith("_r-mutex ")
for dep in record["depends"])):
depends = record["depends"]
depends.append("_r-mutex 1.* anacondar_1")
record["depends"] = depends
if record_name == "gcc_impl_{}".format(subdir):
_relax_exact(fn, record, "binutils_impl_{}".format(subdir))
deps = record.get("depends", ())
if "ntl" in deps and record_name != "sage":
_rename_dependency(fn, record, "ntl", "ntl 10.3.0")
if "libiconv >=1.15,<1.16.0a0" in deps:
_pin_looser(fn, record, "libiconv", upper_bound="1.17.0")
if 're2' in deps and record.get('timestamp', 0) < 1588349339243:
_rename_dependency(fn, record, "re2", "re2 <2020.05.01")
if 'libffi' in deps and record.get('timestamp', 0) < 1605980936031:
_rename_dependency(fn, record, "libffi", "libffi <3.3.0.a0")
if 'libffi >=3.2.1,<4.0a0' in deps and record.get('timestamp', 0) < 1605980936031:
_pin_stricter(fn, record, "libffi", "x.x")
_relax_libssh2_1_x_pinning(fn, record)
if any(dep.startswith("gf2x") for dep in deps):
_pin_stricter(fn, record, "gf2x", "x.x")
if any(dep.startswith("libnetcdf >=4.7.3") for dep in deps):
_pin_stricter(fn, record, "libnetcdf", "x.x.x.x")
if any(dep.startswith("libarchive >=3.3") for dep in deps):
_pin_looser(fn, record, "libarchive", upper_bound="3.6.0")
# fix only packages built before the run_exports was corrected.
if any(dep == "libflang" or dep.startswith("libflang >=5.0.0") for dep in deps) and record.get('timestamp', 0) < 1611789153000:
record["depends"].append("libflang <6.0.0.a0")
if any(dep.startswith("libignition-") or dep == 'libsdformat' for dep in deps):
for dep_idx, _ in enumerate(deps):
dep = record['depends'][dep_idx]
if dep.startswith('libignition-'):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
if dep.startswith('libsdformat '):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
# this doesn't seem to match the _pin_looser or _pin_stricter patterns
# nor _replace_pin
if record_name == "jedi" and record.get("timestamp", 0) < 1592619891258:
for i, dep in enumerate(record["depends"]):
if dep.startswith("parso") and "<" not in dep:
_dep_parts = dep.split(" ")
_dep_parts[1] = _dep_parts[1] + ",<0.8.0"
record["depends"][i] = " ".join(_dep_parts)
# FIXME: disable patching-out blas_openblas feature
# because hotfixes are not applied to gcc7 label
# causing inconsistent behavior
# if (record_name == "blas" and
# record["track_features"] == "blas_openblas"):
# instructions["packages"][fn]["track_features"] = None
# if "features" in record:
# if "blas_openblas" in record["features"]:
# # remove blas_openblas feature
# instructions["packages"][fn]["features"] = _extract_feature(
# record, "blas_openblas")
# if not any(d.startswith("blas ") for d in record["depends"]):
# depends = record['depends']
# depends.append("blas 1.* openblas")
# instructions["packages"][fn]["depends"] = depends
if any(dep.startswith("zstd >=1.4") for dep in deps):
_pin_looser(fn, record, "zstd", max_pin="x.x")
# We pin MPI packages loosely so as to rely on their ABI compatibility
if any(dep.startswith("openmpi >=4.0") for dep in deps):
_pin_looser(fn, record, "openmpi", upper_bound="5.0")
if any(dep.startswith("mpich >=3.3") for dep in deps):
_pin_looser(fn, record, "mpich", upper_bound="4.0")
_replace_pin('libunwind >=1.2.1,<1.3.0a0', 'libunwind >=1.2.1,<2.0.0a0', deps, record)
_replace_pin('snappy >=1.1.7,<1.1.8.0a0', 'snappy >=1.1.7,<2.0.0.0a0', deps, record)
_replace_pin('ncurses >=6.1,<6.2.0a0', 'ncurses >=6.1,<6.3.0a0', deps, record)
_replace_pin('abseil-cpp', 'abseil-cpp =20190808', deps, record)
if record_name not in ["blas", "libblas", "libcblas", "liblapack",
"liblapacke", "lapack", "blas-devel"]:
_replace_pin('liblapack >=3.8.0,<3.9.0a0', 'liblapack >=3.8.0,<4.0.0a0', deps, record)
_replace_pin('liblapacke >=3.8.0,<3.9.0a0', 'liblapacke >=3.8.0,<4.0.0a0', deps, record)
# Filter by timestamp as pythia8 also contains python bindings that shouldn't be pinned
if 'pythia8' in deps and record.get('timestamp', 0) < 1584264455759:
i = record['depends'].index('pythia8')
record['depends'][i] = 'pythia8 >=8.240,<8.300.0a0'
# remove features for openjdk and rb2
if ("track_features" in record and
record['track_features'] is not None):
for feat in record["track_features"].split():
if feat.startswith(("rb2", "openjdk")):
record["track_features"] = _extract_track_feature(
record, feat)
llvm_pkgs = ["libclang", "clang", "clang-tools", "llvm", "llvm-tools", "llvmdev"]
for llvm in ["libllvm8", "libllvm9"]:
if any(dep.startswith(llvm) for dep in deps):
if record_name not in llvm_pkgs:
_relax_exact(fn, record, llvm, max_pin="x.x")
else:
_relax_exact(fn, record, llvm, max_pin="x.x.x")
if record_name in llvm_pkgs:
new_constrains = record.get('constrains', [])
version = record["version"]
for pkg in llvm_pkgs:
if record_name == pkg:
continue
if pkg in new_constrains:
del new_constrains[pkg]
if any(constraint.startswith(f"{pkg} ") for constraint in new_constrains):
continue
new_constrains.append(f'{pkg} {version}.*')
record['constrains'] = new_constrains
# make sure the libgfortran version is bound from 3 to 4 for osx
if subdir == "osx-64":
_fix_libgfortran(fn, record)
_fix_libcxx(fn, record)
full_pkg_name = fn.replace('.tar.bz2', '')
if full_pkg_name in OSX_SDK_FIXES:
_set_osx_virt_min(fn, record, OSX_SDK_FIXES[full_pkg_name])
# make old binutils packages conflict with the new sysroot packages
# that have renamed the sysroot from conda_cos6 or conda_cos7 to just
# conda
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"binutils", "binutils_impl_" + subdir, "ld_impl_" + subdir]
and record.get('timestamp', 0) < 1589953178153 # 2020-05-20
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure the old compilers conflict with the new sysroot packages
# and they only use libraries from the old compilers
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
):
new_constrains = record.get('constrains', [])
for pkg in ["libgcc-ng", "libstdcxx-ng", "libgfortran", "libgomp"]:
new_constrains.append("{} 5.4.*|7.2.*|7.3.*|8.2.*|9.1.*|9.2.*".format(pkg))
new_constrains.append("binutils_impl_" + subdir + " <2.34")
new_constrains.append("ld_impl_" + subdir + " <2.34")
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# we pushed a few builds of the compilers past the list of versions
# above which do not use the sysroot packages - this block catches those
# it will also break some test builds of the new compilers but we should
# not be using those anyways and they are marked as broken.
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] not in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# all ctng activation packages that don't depend on the sysroot_*
# packages are not compatible with the new sysroot_*-based compilers
# root and cling must also be included as they have a builtin C++ interpreter
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_" + subdir, "gxx_" + subdir, "gfortran_" + subdir,
"binutils_" + subdir, "gcc_bootstrap_" + subdir, "root_base", "cling"]
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# old CDTs with the conda_cos6 or conda_cos7 name in the sysroot need to
# conflict with the new CDT and compiler packages
# all of the new CDTs and compilers depend on the sysroot_{subdir} packages
# so we use a constraint on those
if (
subdir == "noarch"
and (
record_name.endswith("-cos6-x86_64") or
record_name.endswith("-cos7-x86_64") or
record_name.endswith("-cos7-aarch64") or
record_name.endswith("-cos7-ppc64le")
)
and not record_name.startswith("sysroot-")
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
if record_name.endswith("x86_64"):
sys_subdir = "linux-64"
elif record_name.endswith("aarch64"):
sys_subdir = "linux-aarch64"
elif record_name.endswith("ppc64le"):
sys_subdir = "linux-ppc64le"
new_constrains = record.get('constrains', [])
if not any(__r.startswith("sysroot_") for __r in new_constrains):
new_constrains.append("sysroot_" + sys_subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure pybind11 and pybind11-global have run constraints on
# the abi metapackage
# see https://github.com/conda-forge/conda-forge-repodata-patches-feedstock/issues/104 # noqa
if (
record_name in ["pybind11", "pybind11-global"]
# this version has a constraint sometimes
and (
pkg_resources.parse_version(record["version"])
<= pkg_resources.parse_version("2.6.1")
)
and not any(
c.startswith("pybind11-abi ")
for c in record.get("constrains", [])
)
):
_add_pybind11_abi_constraint(fn, record)
# add *lal>=7.1.1 as run_constrained for liblal-7.1.1
if (
record_name == "liblal"
and record['version'] == "7.1.1"
and record['build_number'] in (0, 1, 2, 100, 101, 102)
):
record.setdefault('constrains', []).extend((
"lal >=7.1.1",
"python-lal >=7.1.1",
))
return index
def _add_pybind11_abi_constraint(fn, record):
"""the pybind11-abi package uses the internals version
here are the ranges
v2.2.0 1
v2.2.1 1
v2.2.2 1
v2.2.3 1
v2.2.4 2
v2.3.0 3
v2.4.0 3
v2.4.1 3
v2.4.2 3
v2.4.3 3
v2.5.0 4
v2.6.0 4
v2.6.0b1 4
v2.6.0rc1 4
v2.6.0rc2 4
v2.6.0rc3 4
v2.6.1 4
prior to 2.2.0 we set it to 0
"""
ver = pkg_resources.parse_version(record["version"])
if ver < pkg_resources.parse_version("2.2.0"):
abi_ver = "0"
elif ver < pkg_resources.parse_version("2.2.4"):
abi_ver = "1"
elif ver < pkg_resources.parse_version("2.3.0"):
abi_ver = "2"
elif ver < pkg_resources.parse_version("2.5.0"):
abi_ver = "3"
elif ver <= pkg_resources.parse_version("2.6.1"):
abi_ver = "4"
else:
# past this we should have a constrains there already
raise RuntimeError(
"pybind11 version %s out of range for abi" % record["version"]
)
constrains = record.get("constrains", [])
found_idx = None
for idx in range(len(constrains)):
if constrains[idx].startswith("pybind11-abi "):
found_idx = idx
if found_idx is None:
constrains.append("pybind11-abi ==" + abi_ver)
else:
constrains[found_idx] = "pybind11-abi ==" + abi_ver
record["constrains"] = constrains
def _replace_pin(old_pin, new_pin, deps, record):
"""Replace an exact pin with a new one."""
if old_pin in deps:
i = record['depends'].index(old_pin)
record['depends'][i] = new_pin
def _rename_dependency(fn, record, old_name, new_name):
depends = record["depends"]
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == old_name),
None
)
if dep_idx is not None:
parts = depends[dep_idx].split(" ")
remainder = (" " + " ".join(parts[1:])) if len(parts) > 1 else ""
depends[dep_idx] = new_name + remainder
record['depends'] = depends
def _fix_libgfortran(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libgfortran"),
None
)
if dep_idx is not None:
# make sure respect minimum versions still there
# 'libgfortran' -> >=3.0.1,<4.0.0.a0
# 'libgfortran ==3.0.1' -> ==3.0.1
# 'libgfortran >=3.0' -> >=3.0,<4.0.0.a0
# 'libgfortran >=3.0.1' -> >=3.0.1,<4.0.0.a0
if ("==" in depends[dep_idx]) or ("<" in depends[dep_idx]):
pass
elif depends[dep_idx] == "libgfortran":
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0.1" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0,<4.0.0.a0"
record['depends'] = depends
elif ">=4" in depends[dep_idx]:
# catches all of 4.*
depends[dep_idx] = "libgfortran >=4.0.0,<5.0.0.a0"
record['depends'] = depends
def _set_osx_virt_min(fn, record, min_vers):
rconst = record.get("constrains", ())
dep_idx = next(
(q for q, dep in enumerate(rconst)
if dep.split(' ')[0] == "__osx"),
None
)
run_constrained = list(rconst)
if dep_idx is None:
run_constrained.append("__osx >=%s" % min_vers)
if run_constrained:
record['constrains'] = run_constrained
def _fix_libcxx(fn, record):
record_name = record["name"]
if not record_name in ["cctools", "ld64", "llvm-lto-tapi"]:
return
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libcxx"),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) >= 2 and dep_parts[1] == "4.0.1":
# catches all of 4.*
depends[dep_idx] = "libcxx >=4.0.1"
record['depends'] = depends
def pad_list(l, num):
if len(l) >= num:
return l
return l + ["0"]*(num - len(l))
def get_upper_bound(version, max_pin):
num_x = max_pin.count("x")
ver = pad_list(version.split("."), num_x)
ver[num_x:] = ["0"]*(len(ver)-num_x)
ver[num_x-1] = str(int(ver[num_x-1])+1)
return ".".join(ver)
def _relax_exact(fn, record, fix_dep, max_pin=None):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == fix_dep),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if (len(dep_parts) == 3 and \
not any(dep_parts[1].startswith(op) for op in OPERATORS)):
if max_pin is not None:
upper_bound = get_upper_bound(dep_parts[1], max_pin) + "a0"
depends[dep_idx] = "{} >={},<{}".format(*dep_parts[:2], upper_bound)
else:
depends[dep_idx] = "{} >={}".format(*dep_parts[:2])
record['depends'] = depends
def _match_strict_libssh2_1_x_pin(dep):
if dep.startswith("libssh2 >=1.8.0,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.1,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.2,<1.9.0a0"):
return True
if dep.startswith("libssh2 1.8.*"):
return True
return False
def _relax_libssh2_1_x_pinning(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if _match_strict_libssh2_1_x_pin(dep)),
None
)
if dep_idx is not None:
depends[dep_idx] = "libssh2 >=1.8.0,<2.0.0a0"
cb_pin_regex = re.compile(r"^>=(?P<lower>\d(\.\d+)*a?),<(?P<upper>\d(\.\d+)*)a0$")
def _pin_stricter(fn, record, fix_dep, max_pin):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
new_upper = get_upper_bound(lower, max_pin).split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) > tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _pin_looser(fn, record, fix_dep, max_pin=None, upper_bound=None):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
if upper_bound is None:
new_upper = get_upper_bound(lower, max_pin).split(".")
else:
new_upper = upper_bound.split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) < tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _extract_and_remove_vc_feature(record):
features = record.get('features', '').split()
vc_features = tuple(f for f in features if f.startswith('vc'))
if not vc_features:
return None
non_vc_features = tuple(f for f in features if f not in vc_features)
vc_version = int(vc_features[0][2:]) # throw away all but the first
if non_vc_features:
record['features'] = ' '.join(non_vc_features)
else:
record['features'] = None
return vc_version
def _extract_feature(record, feature_name):
features = record.get('features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def _extract_track_feature(record, feature_name):
features = record.get('track_features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def main():
# Step 1. Collect initial repodata for all subdirs.
repodatas = {}
if "CF_SUBDIR" in os.environ:
# For local debugging
subdirs = os.environ["CF_SUBDIR"].split(";")
else:
subdirs = SUBDIRS
for subdir in tqdm.tqdm(subdirs, desc="Downloading repodata"):
repodata_url = "/".join(
(CHANNEL_ALIAS, CHANNEL_NAME, subdir, "repodata_from_packages.json"))
response = requests.get(repodata_url)
response.raise_for_status()
repodatas[subdir] = response.json()
# Step 2. Create all patch instructions.
prefix_dir = os.getenv("PREFIX", "tmp")
for subdir in subdirs:
prefix_subdir = join(prefix_dir, subdir)
if not isdir(prefix_subdir):
os.makedirs(prefix_subdir)
# Step 2a. Generate a new index.
new_index = _gen_new_index(repodatas[subdir], subdir)
# Step 2b. Generate the instructions by diff'ing the indices.
instructions = _gen_patch_instructions(
repodatas[subdir]['packages'], new_index, subdir)
# Step 2c. Output this to $PREFIX so that we bundle the JSON files.
patch_instructions_path = join(
prefix_subdir, "patch_instructions.json")
with open(patch_instructions_path, 'w') as fh:
json.dump(
instructions, fh, indent=2,
sort_keys=True, separators=(',', ': '))
if __name__ == "__main__":
sys.exit(main())
| 39.949873 | 135 | 0.564344 |
9bf430dd43c0a575447d5aad59fac81add20bcfd | 50,375 | py | Python | tencentcloud/ams/v20200608/models.py | xuzixx/tencentcloud-sdk-python | 98866ab9fd104cd6475b62fe78ff3fffd96d5ce0 | [
"Apache-2.0"
] | null | null | null | tencentcloud/ams/v20200608/models.py | xuzixx/tencentcloud-sdk-python | 98866ab9fd104cd6475b62fe78ff3fffd96d5ce0 | [
"Apache-2.0"
] | null | null | null | tencentcloud/ams/v20200608/models.py | xuzixx/tencentcloud-sdk-python | 98866ab9fd104cd6475b62fe78ff3fffd96d5ce0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AmsDetailInfo(AbstractModel):
"""机器审核详情列表数据项
"""
def __init__(self):
"""
:param Label: 标签
:type Label: list of str
:param Duration: 时长(秒/s)
:type Duration: int
:param Name: 任务名
:type Name: str
:param TaskID: 任务ID,创建任务后返回的TaskId字段
:type TaskID: str
:param InsertTime: 插入时间
:type InsertTime: str
:param DataForm: 数据来源 0机审,其他为自主审核
:type DataForm: int
:param Operator: 操作人
:type Operator: str
:param OriginalLabel: 原始命中标签
:type OriginalLabel: list of str
:param OperateTime: 操作时间
:type OperateTime: str
:param Url: 视频原始地址
:type Url: str
:param Thumbnail: 封面图地址
:type Thumbnail: str
:param Content: 短音频内容
:type Content: str
:param DetailCount: 短音频个数
:type DetailCount: int
:param RequestId: 音频审核的请求 id
:type RequestId: str
:param Status: 音频机审状态
:type Status: str
"""
self.Label = None
self.Duration = None
self.Name = None
self.TaskID = None
self.InsertTime = None
self.DataForm = None
self.Operator = None
self.OriginalLabel = None
self.OperateTime = None
self.Url = None
self.Thumbnail = None
self.Content = None
self.DetailCount = None
self.RequestId = None
self.Status = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Duration = params.get("Duration")
self.Name = params.get("Name")
self.TaskID = params.get("TaskID")
self.InsertTime = params.get("InsertTime")
self.DataForm = params.get("DataForm")
self.Operator = params.get("Operator")
self.OriginalLabel = params.get("OriginalLabel")
self.OperateTime = params.get("OperateTime")
self.Url = params.get("Url")
self.Thumbnail = params.get("Thumbnail")
self.Content = params.get("Content")
self.DetailCount = params.get("DetailCount")
self.RequestId = params.get("RequestId")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioResult(AbstractModel):
"""音频输出参数
"""
def __init__(self):
"""
:param HitFlag: 是否命中
0 未命中
1 命中
注意:此字段可能返回 null,表示取不到有效值。
:type HitFlag: int
:param Label: 恶意标签,Normal:正常,Porn:色情,Abuse:谩骂,Ad:广告,Custom:自定义词库。
以及令人反感、不安全或不适宜的内容类型。
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param Suggestion: 建议您拿到判断结果后的执行操作。
建议值,Block:建议屏蔽,Review:建议复审,Pass:建议通过
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param Score: 得分,0-100
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
:param Text: 音频ASR文本
注意:此字段可能返回 null,表示取不到有效值。
:type Text: str
:param Url: 音频片段存储URL,有效期为1天
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param Duration: 音频时长
:type Duration: str
:param Extra: 拓展字段
:type Extra: str
:param TextResults: 文本识别结果
:type TextResults: list of AudioResultDetailTextResult
:param MoanResults: 音频呻吟检测结果
:type MoanResults: list of AudioResultDetailMoanResult
:param LanguageResults: 音频语言检测结果
:type LanguageResults: list of AudioResultDetailLanguageResult
"""
self.HitFlag = None
self.Label = None
self.Suggestion = None
self.Score = None
self.Text = None
self.Url = None
self.Duration = None
self.Extra = None
self.TextResults = None
self.MoanResults = None
self.LanguageResults = None
def _deserialize(self, params):
self.HitFlag = params.get("HitFlag")
self.Label = params.get("Label")
self.Suggestion = params.get("Suggestion")
self.Score = params.get("Score")
self.Text = params.get("Text")
self.Url = params.get("Url")
self.Duration = params.get("Duration")
self.Extra = params.get("Extra")
if params.get("TextResults") is not None:
self.TextResults = []
for item in params.get("TextResults"):
obj = AudioResultDetailTextResult()
obj._deserialize(item)
self.TextResults.append(obj)
if params.get("MoanResults") is not None:
self.MoanResults = []
for item in params.get("MoanResults"):
obj = AudioResultDetailMoanResult()
obj._deserialize(item)
self.MoanResults.append(obj)
if params.get("LanguageResults") is not None:
self.LanguageResults = []
for item in params.get("LanguageResults"):
obj = AudioResultDetailLanguageResult()
obj._deserialize(item)
self.LanguageResults.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioResultDetailLanguageResult(AbstractModel):
"""音频小语种检测结果
"""
def __init__(self):
"""
:param Label: 语言信息
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param Score: 得分
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
:param StartTime: 开始时间
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: float
:param EndTime: 结束时间
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: float
:param SubLabelCode: 子标签码
注意:此字段可能返回 null,表示取不到有效值。
:type SubLabelCode: str
"""
self.Label = None
self.Score = None
self.StartTime = None
self.EndTime = None
self.SubLabelCode = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Score = params.get("Score")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SubLabelCode = params.get("SubLabelCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioResultDetailMoanResult(AbstractModel):
"""音频呻吟审核结果
"""
def __init__(self):
"""
:param Label: 固定为Moan(呻吟)
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param Score: 分数
:type Score: int
:param StartTime: 开始时间
:type StartTime: float
:param EndTime: 结束时间
:type EndTime: float
:param SubLabelCode: 子标签码
:type SubLabelCode: str
"""
self.Label = None
self.Score = None
self.StartTime = None
self.EndTime = None
self.SubLabelCode = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Score = params.get("Score")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SubLabelCode = params.get("SubLabelCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioResultDetailTextResult(AbstractModel):
"""音频ASR文本审核结果
"""
def __init__(self):
"""
:param Label: 标签
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param Keywords: 命中的关键词
注意:此字段可能返回 null,表示取不到有效值。
:type Keywords: list of str
:param LibId: 命中的LibId
注意:此字段可能返回 null,表示取不到有效值。
:type LibId: str
:param LibName: 命中的LibName
注意:此字段可能返回 null,表示取不到有效值。
:type LibName: str
:param Score: 得分
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
:param Suggestion: 审核建议
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param LibType: 词库类型 1 黑白库 2 自定义库
:type LibType: int
"""
self.Label = None
self.Keywords = None
self.LibId = None
self.LibName = None
self.Score = None
self.Suggestion = None
self.LibType = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Keywords = params.get("Keywords")
self.LibId = params.get("LibId")
self.LibName = params.get("LibName")
self.Score = params.get("Score")
self.Suggestion = params.get("Suggestion")
self.LibType = params.get("LibType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AudioSegments(AbstractModel):
"""声音段信息
"""
def __init__(self):
"""
:param OffsetTime: 截帧时间。
点播文件:该值为相对于视频偏移时间,单位为秒,例如:0,5,10
直播流:该值为时间戳,例如:1594650717
注意:此字段可能返回 null,表示取不到有效值。
:type OffsetTime: str
:param Result: 结果集
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.ams.v20200608.models.AudioResult`
"""
self.OffsetTime = None
self.Result = None
def _deserialize(self, params):
self.OffsetTime = params.get("OffsetTime")
if params.get("Result") is not None:
self.Result = AudioResult()
self.Result._deserialize(params.get("Result"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BucketInfo(AbstractModel):
"""文件桶信息
参考腾讯云存储相关说明 https://cloud.tencent.com/document/product/436/44352
"""
def __init__(self):
"""
:param Bucket: 腾讯云对象存储,存储桶名称
:type Bucket: str
:param Region: 地域
:type Region: str
:param Object: 对象Key
:type Object: str
"""
self.Bucket = None
self.Region = None
self.Object = None
def _deserialize(self, params):
self.Bucket = params.get("Bucket")
self.Region = params.get("Region")
self.Object = params.get("Object")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelTaskRequest(AbstractModel):
"""CancelTask请求参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID
:type TaskId: str
"""
self.TaskId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CancelTaskResponse(AbstractModel):
"""CancelTask返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateAudioModerationTaskRequest(AbstractModel):
"""CreateAudioModerationTask请求参数结构体
"""
def __init__(self):
"""
:param BizType: 业务类型, 定义 模版策略,输出存储配置。如果没有BizType,可以先参考 【创建业务配置】接口进行创建
:type BizType: str
:param Type: 审核类型,这里可选:AUDIO (点播音频)和 LIVE_AUDIO(直播音频)
:type Type: str
:param Seed: 回调签名key,具体可以查看签名文档。
:type Seed: str
:param CallbackUrl: 接收审核信息回调地址,如果设置,则审核过程中产生的违规音频片段和画面截帧发送此接口
:type CallbackUrl: str
:param Tasks: 输入的任务信息,最多可以同时创建10个任务
:type Tasks: list of TaskInput
"""
self.BizType = None
self.Type = None
self.Seed = None
self.CallbackUrl = None
self.Tasks = None
def _deserialize(self, params):
self.BizType = params.get("BizType")
self.Type = params.get("Type")
self.Seed = params.get("Seed")
self.CallbackUrl = params.get("CallbackUrl")
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = TaskInput()
obj._deserialize(item)
self.Tasks.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateAudioModerationTaskResponse(AbstractModel):
"""CreateAudioModerationTask返回参数结构体
"""
def __init__(self):
"""
:param Results: 任务创建结果
注意:此字段可能返回 null,表示取不到有效值。
:type Results: list of TaskResult
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Results = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = TaskResult()
obj._deserialize(item)
self.Results.append(obj)
self.RequestId = params.get("RequestId")
class CreateBizConfigRequest(AbstractModel):
"""CreateBizConfig请求参数结构体
"""
def __init__(self):
"""
:param BizType: 业务类型,仅限英文字母、数字和下划线(_)组成,长度不超过8位
:type BizType: str
:param MediaModeration: 配置信息,
:type MediaModeration: :class:`tencentcloud.ams.v20200608.models.MediaModerationConfig`
:param BizName: 业务名称,用于标识业务场景,长度不超过32位
:type BizName: str
:param ModerationCategories: 审核内容,可选:Polity (政治); Porn (色情); Illegal(违法);Abuse (谩骂); Terror (暴恐); Ad (广告);
:type ModerationCategories: list of str
"""
self.BizType = None
self.MediaModeration = None
self.BizName = None
self.ModerationCategories = None
def _deserialize(self, params):
self.BizType = params.get("BizType")
if params.get("MediaModeration") is not None:
self.MediaModeration = MediaModerationConfig()
self.MediaModeration._deserialize(params.get("MediaModeration"))
self.BizName = params.get("BizName")
self.ModerationCategories = params.get("ModerationCategories")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateBizConfigResponse(AbstractModel):
"""CreateBizConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAmsListRequest(AbstractModel):
"""DescribeAmsList请求参数结构体
"""
def __init__(self):
"""
:param PageToken: 页码
:type PageToken: str
:param Limit: 过滤条件
:type Limit: int
:param PageDirection: 查询方向
:type PageDirection: str
:param Filters: 过滤条件
:type Filters: list of Filter
"""
self.PageToken = None
self.Limit = None
self.PageDirection = None
self.Filters = None
def _deserialize(self, params):
self.PageToken = params.get("PageToken")
self.Limit = params.get("Limit")
self.PageDirection = params.get("PageDirection")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAmsListResponse(AbstractModel):
"""DescribeAmsList返回参数结构体
"""
def __init__(self):
"""
:param AmsDetailSet: 返回列表数据----非必选,该参数暂未对外开放
:type AmsDetailSet: list of AmsDetailInfo
:param Total: 总条数
:type Total: int
:param PageToken: 分页 token
:type PageToken: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AmsDetailSet = None
self.Total = None
self.PageToken = None
self.RequestId = None
def _deserialize(self, params):
if params.get("AmsDetailSet") is not None:
self.AmsDetailSet = []
for item in params.get("AmsDetailSet"):
obj = AmsDetailInfo()
obj._deserialize(item)
self.AmsDetailSet.append(obj)
self.Total = params.get("Total")
self.PageToken = params.get("PageToken")
self.RequestId = params.get("RequestId")
class DescribeAudioStatRequest(AbstractModel):
"""DescribeAudioStat请求参数结构体
"""
def __init__(self):
"""
:param AuditType: 审核类型 1: 机器审核; 2: 人工审核
:type AuditType: int
:param Filters: 查询条件
:type Filters: list of Filters
"""
self.AuditType = None
self.Filters = None
def _deserialize(self, params):
self.AuditType = params.get("AuditType")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filters()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAudioStatResponse(AbstractModel):
"""DescribeAudioStat返回参数结构体
"""
def __init__(self):
"""
:param Overview: 识别结果统计
:type Overview: :class:`tencentcloud.ams.v20200608.models.Overview`
:param TrendCount: 识别量统计
:type TrendCount: list of TrendCount
:param EvilCount: 违规数据分布
:type EvilCount: list of EvilCount
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Overview = None
self.TrendCount = None
self.EvilCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Overview") is not None:
self.Overview = Overview()
self.Overview._deserialize(params.get("Overview"))
if params.get("TrendCount") is not None:
self.TrendCount = []
for item in params.get("TrendCount"):
obj = TrendCount()
obj._deserialize(item)
self.TrendCount.append(obj)
if params.get("EvilCount") is not None:
self.EvilCount = []
for item in params.get("EvilCount"):
obj = EvilCount()
obj._deserialize(item)
self.EvilCount.append(obj)
self.RequestId = params.get("RequestId")
class DescribeBizConfigRequest(AbstractModel):
"""DescribeBizConfig请求参数结构体
"""
def __init__(self):
"""
:param BizType: 审核业务类类型
:type BizType: str
"""
self.BizType = None
def _deserialize(self, params):
self.BizType = params.get("BizType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBizConfigResponse(AbstractModel):
"""DescribeBizConfig返回参数结构体
"""
def __init__(self):
"""
:param BizType: 业务类型
:type BizType: str
:param BizName: 业务名称
注意:此字段可能返回 null,表示取不到有效值。
:type BizName: str
:param ModerationCategories: 审核范围
:type ModerationCategories: list of str
:param MediaModeration: 多媒体审核配置
注意:此字段可能返回 null,表示取不到有效值。
:type MediaModeration: :class:`tencentcloud.ams.v20200608.models.MediaModerationConfig`
:param CreatedAt: 创建时间
:type CreatedAt: str
:param UpdatedAt: 更新时间
:type UpdatedAt: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BizType = None
self.BizName = None
self.ModerationCategories = None
self.MediaModeration = None
self.CreatedAt = None
self.UpdatedAt = None
self.RequestId = None
def _deserialize(self, params):
self.BizType = params.get("BizType")
self.BizName = params.get("BizName")
self.ModerationCategories = params.get("ModerationCategories")
if params.get("MediaModeration") is not None:
self.MediaModeration = MediaModerationConfig()
self.MediaModeration._deserialize(params.get("MediaModeration"))
self.CreatedAt = params.get("CreatedAt")
self.UpdatedAt = params.get("UpdatedAt")
self.RequestId = params.get("RequestId")
class DescribeTaskDetailRequest(AbstractModel):
"""DescribeTaskDetail请求参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID,创建任务后返回的TaskId字段
:type TaskId: str
:param ShowAllSegments: 是否展示所有分片,默认只展示命中规则的分片
:type ShowAllSegments: bool
"""
self.TaskId = None
self.ShowAllSegments = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.ShowAllSegments = params.get("ShowAllSegments")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTaskDetailResponse(AbstractModel):
"""DescribeTaskDetail返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID
注意:此字段可能返回 null,表示取不到有效值。
:type TaskId: str
:param DataId: 审核时传入的数据Id
注意:此字段可能返回 null,表示取不到有效值。
:type DataId: str
:param BizType: 业务类型,用于调用识别策略模板;
(暂未发布功能,敬请期待)
注意:此字段可能返回 null,表示取不到有效值。
:type BizType: str
:param Name: 任务名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param Status: 查询内容审核任务的状态,可选值:
FINISH 已完成
PENDING 等待中
RUNNING 进行中
ERROR 出错
CANCELLED 已取消
注意:此字段可能返回 null,表示取不到有效值。
:type Status: str
:param Type: 任务类型:可选AUDIO(点播音频),LIVE_AUDIO(直播音频)
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param Suggestion: 智能审核服务对于内容违规类型的等级,可选值:
Pass 建议通过;
Reveiw 建议复审;
Block 建议屏蔽;
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param Labels: 智能审核服务对于内容违规类型的判断,详见返回值列表
如:Label:Porn(色情);
注意:此字段可能返回 null,表示取不到有效值。
:type Labels: list of TaskLabel
:param MediaInfo: 传入媒体的解码信息
注意:此字段可能返回 null,表示取不到有效值。
:type MediaInfo: :class:`tencentcloud.ams.v20200608.models.MediaInfo`
:param InputInfo: 审核任务的信息
注意:此字段可能返回 null,表示取不到有效值。
:type InputInfo: :class:`tencentcloud.ams.v20200608.models.InputInfo`
:param CreatedAt: 审核任务的创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreatedAt: str
:param UpdatedAt: 审核任务的更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdatedAt: str
:param TryInSeconds: 在N秒后重试
注意:此字段可能返回 null,表示取不到有效值。
:type TryInSeconds: int
:param AudioSegments: 视频/音频审核中的音频结果
注意:此字段可能返回 null,表示取不到有效值。
:type AudioSegments: list of AudioSegments
:param ImageSegments: 视频审核中的图片结果
注意:此字段可能返回 null,表示取不到有效值。
:type ImageSegments: list of ImageSegments
:param AudioText: 音频识别总文本
注意:此字段可能返回 null,表示取不到有效值。
:type AudioText: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.DataId = None
self.BizType = None
self.Name = None
self.Status = None
self.Type = None
self.Suggestion = None
self.Labels = None
self.MediaInfo = None
self.InputInfo = None
self.CreatedAt = None
self.UpdatedAt = None
self.TryInSeconds = None
self.AudioSegments = None
self.ImageSegments = None
self.AudioText = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.DataId = params.get("DataId")
self.BizType = params.get("BizType")
self.Name = params.get("Name")
self.Status = params.get("Status")
self.Type = params.get("Type")
self.Suggestion = params.get("Suggestion")
if params.get("Labels") is not None:
self.Labels = []
for item in params.get("Labels"):
obj = TaskLabel()
obj._deserialize(item)
self.Labels.append(obj)
if params.get("MediaInfo") is not None:
self.MediaInfo = MediaInfo()
self.MediaInfo._deserialize(params.get("MediaInfo"))
if params.get("InputInfo") is not None:
self.InputInfo = InputInfo()
self.InputInfo._deserialize(params.get("InputInfo"))
self.CreatedAt = params.get("CreatedAt")
self.UpdatedAt = params.get("UpdatedAt")
self.TryInSeconds = params.get("TryInSeconds")
if params.get("AudioSegments") is not None:
self.AudioSegments = []
for item in params.get("AudioSegments"):
obj = AudioSegments()
obj._deserialize(item)
self.AudioSegments.append(obj)
if params.get("ImageSegments") is not None:
self.ImageSegments = []
for item in params.get("ImageSegments"):
obj = ImageSegments()
obj._deserialize(item)
self.ImageSegments.append(obj)
self.AudioText = params.get("AudioText")
self.RequestId = params.get("RequestId")
class EvilCount(AbstractModel):
"""违规数据分布
"""
def __init__(self):
"""
:param EvilType: ----非必选,该参数功能暂未对外开放
:type EvilType: str
:param Count: 分布类型总量
:type Count: int
"""
self.EvilType = None
self.Count = None
def _deserialize(self, params):
self.EvilType = params.get("EvilType")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class FileOutput(AbstractModel):
"""Cos FileOutput
"""
def __init__(self):
"""
:param Bucket: 存储的Bucket
:type Bucket: str
:param Region: Cos Region
:type Region: str
:param ObjectPrefix: 对象前缀
:type ObjectPrefix: str
"""
self.Bucket = None
self.Region = None
self.ObjectPrefix = None
def _deserialize(self, params):
self.Bucket = params.get("Bucket")
self.Region = params.get("Region")
self.ObjectPrefix = params.get("ObjectPrefix")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Filter(AbstractModel):
"""描述键值对过滤器,用于条件过滤查询。例如过滤ID、名称、状态等
"""
def __init__(self):
"""
:param Name: 过滤键的名称。
:type Name: str
:param Values: 一个或者多个过滤值。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Filters(AbstractModel):
"""音频过滤条件
"""
def __init__(self):
"""
:param Name: 查询字段:
策略BizType
子账号SubUin
日期区间DateRange
:type Name: str
:param Values: 查询值
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ImageResult(AbstractModel):
"""Result结果详情
"""
def __init__(self):
"""
:param HitFlag: 违规标志
0 未命中
1 命中
注意:此字段可能返回 null,表示取不到有效值。
:type HitFlag: int
:param Suggestion: 建议您拿到判断结果后的执行操作。
建议值,Block:建议屏蔽,Review:建议复审,Pass:建议通过
:type Suggestion: str
:param Label: 恶意标签,Normal:正常,Porn:色情,Abuse:谩骂,Ad:广告,Custom:自定义词库。
以及令人反感、不安全或不适宜的内容类型。
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param Score: 得分
:type Score: int
:param Results: 画面截帧图片结果集
:type Results: list of ImageResultResult
:param Url: 图片URL地址
:type Url: str
:param Extra: 附加字段
:type Extra: str
"""
self.HitFlag = None
self.Suggestion = None
self.Label = None
self.Score = None
self.Results = None
self.Url = None
self.Extra = None
def _deserialize(self, params):
self.HitFlag = params.get("HitFlag")
self.Suggestion = params.get("Suggestion")
self.Label = params.get("Label")
self.Score = params.get("Score")
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = ImageResultResult()
obj._deserialize(item)
self.Results.append(obj)
self.Url = params.get("Url")
self.Extra = params.get("Extra")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ImageResultResult(AbstractModel):
"""图片输出结果的子结果
"""
def __init__(self):
"""
:param Scene: 场景
Porn 色情
Sexy 性感
Abuse 谩骂
Ad 广告
等多个识别场景
注意:此字段可能返回 null,表示取不到有效值。
:type Scene: str
:param HitFlag: 是否命中
0 未命中
1 命中
注意:此字段可能返回 null,表示取不到有效值。
:type HitFlag: int
:param Suggestion: 建议您拿到判断结果后的执行操作。
建议值,Block:建议屏蔽,Review:建议复审,Pass:建议通过
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param Label: 标签
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param SubLabel: 子标签
注意:此字段可能返回 null,表示取不到有效值。
:type SubLabel: str
:param Score: 分数
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
:param Names: 如果命中场景为涉政,则该数据为人物姓名列表,否则null
:type Names: list of str
:param Text: 图片OCR文本
注意:此字段可能返回 null,表示取不到有效值。
:type Text: str
:param Details: 其他详情
:type Details: list of ImageResultsResultDetail
"""
self.Scene = None
self.HitFlag = None
self.Suggestion = None
self.Label = None
self.SubLabel = None
self.Score = None
self.Names = None
self.Text = None
self.Details = None
def _deserialize(self, params):
self.Scene = params.get("Scene")
self.HitFlag = params.get("HitFlag")
self.Suggestion = params.get("Suggestion")
self.Label = params.get("Label")
self.SubLabel = params.get("SubLabel")
self.Score = params.get("Score")
self.Names = params.get("Names")
self.Text = params.get("Text")
if params.get("Details") is not None:
self.Details = []
for item in params.get("Details"):
obj = ImageResultsResultDetail()
obj._deserialize(item)
self.Details.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ImageResultsResultDetail(AbstractModel):
"""具体场景下的图片识别结果
"""
def __init__(self):
"""
:param Location: 位置信息
注意:此字段可能返回 null,表示取不到有效值。
:type Location: list of ImageResultsResultDetailLocation
:param Name: 任务名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param Text: OCR识别文本
注意:此字段可能返回 null,表示取不到有效值。
:type Text: str
:param Label: 标签
:type Label: str
:param LibId: 库ID
注意:此字段可能返回 null,表示取不到有效值。
:type LibId: str
:param LibName: 库名称
注意:此字段可能返回 null,表示取不到有效值。
:type LibName: str
:param Keywords: 命中的关键词
注意:此字段可能返回 null,表示取不到有效值。
:type Keywords: list of str
:param Suggestion: 建议
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param Score: 得分
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
:param SubLabelCode: 子标签码
注意:此字段可能返回 null,表示取不到有效值。
:type SubLabelCode: str
"""
self.Location = None
self.Name = None
self.Text = None
self.Label = None
self.LibId = None
self.LibName = None
self.Keywords = None
self.Suggestion = None
self.Score = None
self.SubLabelCode = None
def _deserialize(self, params):
if params.get("Location") is not None:
self.Location = []
for item in params.get("Location"):
obj = ImageResultsResultDetailLocation()
obj._deserialize(item)
self.Location.append(obj)
self.Name = params.get("Name")
self.Text = params.get("Text")
self.Label = params.get("Label")
self.LibId = params.get("LibId")
self.LibName = params.get("LibName")
self.Keywords = params.get("Keywords")
self.Suggestion = params.get("Suggestion")
self.Score = params.get("Score")
self.SubLabelCode = params.get("SubLabelCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ImageResultsResultDetailLocation(AbstractModel):
"""图片详情位置信息
"""
def __init__(self):
"""
:param X: x坐标
注意:此字段可能返回 null,表示取不到有效值。
:type X: float
:param Y: y坐标
注意:此字段可能返回 null,表示取不到有效值。
:type Y: float
:param Width: 宽度
注意:此字段可能返回 null,表示取不到有效值。
:type Width: int
:param Height: 高度
注意:此字段可能返回 null,表示取不到有效值。
:type Height: int
:param Rotate: 旋转角度
注意:此字段可能返回 null,表示取不到有效值。
:type Rotate: float
"""
self.X = None
self.Y = None
self.Width = None
self.Height = None
self.Rotate = None
def _deserialize(self, params):
self.X = params.get("X")
self.Y = params.get("Y")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.Rotate = params.get("Rotate")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ImageSegments(AbstractModel):
"""图片段信息
"""
def __init__(self):
"""
:param Result: 画面截帧结果详情
:type Result: :class:`tencentcloud.ams.v20200608.models.ImageResult`
:param OffsetTime: 截帧时间。
点播文件:该值为相对于视频偏移时间,单位为秒,例如:0,5,10
直播流:该值为时间戳,例如:1594650717
:type OffsetTime: str
"""
self.Result = None
self.OffsetTime = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = ImageResult()
self.Result._deserialize(params.get("Result"))
self.OffsetTime = params.get("OffsetTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InputInfo(AbstractModel):
"""输入信息详情
"""
def __init__(self):
"""
:param Type: 传入的类型可选:URL,COS
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param Url: Url地址
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param BucketInfo: 桶信息。当输入当时COS时,该字段不为空
注意:此字段可能返回 null,表示取不到有效值。
:type BucketInfo: :class:`tencentcloud.ams.v20200608.models.BucketInfo`
"""
self.Type = None
self.Url = None
self.BucketInfo = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.Url = params.get("Url")
if params.get("BucketInfo") is not None:
self.BucketInfo = BucketInfo()
self.BucketInfo._deserialize(params.get("BucketInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MediaInfo(AbstractModel):
"""媒体类型
"""
def __init__(self):
"""
:param Codecs: 编码格式
:type Codecs: str
:param Duration: 流检测时分片时长
注意:此字段可能返回 0,表示取不到有效值。
:type Duration: int
:param Width: 宽,单位为像素
:type Width: int
:param Height: 高,单位为像素
:type Height: int
:param Thumbnail: 缩略图
:type Thumbnail: str
"""
self.Codecs = None
self.Duration = None
self.Width = None
self.Height = None
self.Thumbnail = None
def _deserialize(self, params):
self.Codecs = params.get("Codecs")
self.Duration = params.get("Duration")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.Thumbnail = params.get("Thumbnail")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MediaModerationConfig(AbstractModel):
"""媒体审核配置
"""
def __init__(self):
"""
:param AudioFrequency: 音频截帧频率。默认一分钟
:type AudioFrequency: int
:param ImageFrequency: 图片取帧频率, 单位(秒/帧),默认 5, 可选 1 ~ 300
:type ImageFrequency: int
:param CallbackUrl: 异步回调地址。
:type CallbackUrl: str
:param SegmentOutput: 临时文件存储位置
:type SegmentOutput: :class:`tencentcloud.ams.v20200608.models.FileOutput`
:param UseOCR: 是否使用OCR,默认为true
:type UseOCR: bool
:param UseAudio: 是否使用音频。(音频场景下,该值永远为true)
:type UseAudio: bool
"""
self.AudioFrequency = None
self.ImageFrequency = None
self.CallbackUrl = None
self.SegmentOutput = None
self.UseOCR = None
self.UseAudio = None
def _deserialize(self, params):
self.AudioFrequency = params.get("AudioFrequency")
self.ImageFrequency = params.get("ImageFrequency")
self.CallbackUrl = params.get("CallbackUrl")
if params.get("SegmentOutput") is not None:
self.SegmentOutput = FileOutput()
self.SegmentOutput._deserialize(params.get("SegmentOutput"))
self.UseOCR = params.get("UseOCR")
self.UseAudio = params.get("UseAudio")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Overview(AbstractModel):
"""识别结果统计
"""
def __init__(self):
"""
:param TotalCount: 总调用量
:type TotalCount: int
:param TotalHour: 总调用时长
:type TotalHour: int
:param PassCount: 通过量
:type PassCount: int
:param PassHour: 通过时长
:type PassHour: int
:param EvilCount: 违规量
:type EvilCount: int
:param EvilHour: 违规时长
:type EvilHour: int
:param SuspectCount: 疑似违规量
:type SuspectCount: int
:param SuspectHour: 疑似违规时长
:type SuspectHour: int
"""
self.TotalCount = None
self.TotalHour = None
self.PassCount = None
self.PassHour = None
self.EvilCount = None
self.EvilHour = None
self.SuspectCount = None
self.SuspectHour = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.TotalHour = params.get("TotalHour")
self.PassCount = params.get("PassCount")
self.PassHour = params.get("PassHour")
self.EvilCount = params.get("EvilCount")
self.EvilHour = params.get("EvilHour")
self.SuspectCount = params.get("SuspectCount")
self.SuspectHour = params.get("SuspectHour")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class StorageInfo(AbstractModel):
"""数据存储信息
"""
def __init__(self):
"""
:param Type: 类型 可选:
URL 资源链接类型
COS 腾讯云对象存储类型
:type Type: str
:param Url: 资源链接
:type Url: str
:param BucketInfo: 腾讯云存储桶信息
:type BucketInfo: :class:`tencentcloud.ams.v20200608.models.BucketInfo`
"""
self.Type = None
self.Url = None
self.BucketInfo = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.Url = params.get("Url")
if params.get("BucketInfo") is not None:
self.BucketInfo = BucketInfo()
self.BucketInfo._deserialize(params.get("BucketInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TaskInput(AbstractModel):
"""音视频任务结构
"""
def __init__(self):
"""
:param DataId: 数据ID
:type DataId: str
:param Name: 任务名
:type Name: str
:param Input: 任务输入
:type Input: :class:`tencentcloud.ams.v20200608.models.StorageInfo`
"""
self.DataId = None
self.Name = None
self.Input = None
def _deserialize(self, params):
self.DataId = params.get("DataId")
self.Name = params.get("Name")
if params.get("Input") is not None:
self.Input = StorageInfo()
self.Input._deserialize(params.get("Input"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TaskLabel(AbstractModel):
"""任务输出标签
"""
def __init__(self):
"""
:param Label: 恶意标签,Normal:正常,Porn:色情,Abuse:谩骂,Ad:广告,Custom:自定义词库。
以及令人反感、不安全或不适宜的内容类型。
注意:此字段可能返回 null,表示取不到有效值。
:type Label: str
:param Suggestion: 建议您拿到判断结果后的执行操作。
建议值,Block:建议屏蔽,Review:建议复审,Pass:建议通过
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param Score: 得分,分数是 0 ~ 100
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
"""
self.Label = None
self.Suggestion = None
self.Score = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Suggestion = params.get("Suggestion")
self.Score = params.get("Score")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TaskResult(AbstractModel):
"""创建任务时的返回结果
"""
def __init__(self):
"""
:param DataId: 请求时传入的DataId
注意:此字段可能返回 null,表示取不到有效值。
:type DataId: str
:param TaskId: TaskId,任务ID
注意:此字段可能返回 null,表示取不到有效值。
:type TaskId: str
:param Code: 错误码。如果code为OK,则表示创建成功,其他则参考公共错误码
注意:此字段可能返回 null,表示取不到有效值。
:type Code: str
:param Message: 如果错误,该字段表示错误详情
注意:此字段可能返回 null,表示取不到有效值。
:type Message: str
"""
self.DataId = None
self.TaskId = None
self.Code = None
self.Message = None
def _deserialize(self, params):
self.DataId = params.get("DataId")
self.TaskId = params.get("TaskId")
self.Code = params.get("Code")
self.Message = params.get("Message")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TrendCount(AbstractModel):
"""识别量统计
"""
def __init__(self):
"""
:param TotalCount: 总调用量
:type TotalCount: int
:param TotalHour: 总调用时长
:type TotalHour: int
:param PassCount: 通过量
:type PassCount: int
:param PassHour: 通过时长
:type PassHour: int
:param EvilCount: 违规量
:type EvilCount: int
:param EvilHour: 违规时长
:type EvilHour: int
:param SuspectCount: 疑似违规量
:type SuspectCount: int
:param SuspectHour: 疑似违规时长
:type SuspectHour: int
:param Date: 日期
:type Date: str
"""
self.TotalCount = None
self.TotalHour = None
self.PassCount = None
self.PassHour = None
self.EvilCount = None
self.EvilHour = None
self.SuspectCount = None
self.SuspectHour = None
self.Date = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.TotalHour = params.get("TotalHour")
self.PassCount = params.get("PassCount")
self.PassHour = params.get("PassHour")
self.EvilCount = params.get("EvilCount")
self.EvilHour = params.get("EvilHour")
self.SuspectCount = params.get("SuspectCount")
self.SuspectHour = params.get("SuspectHour")
self.Date = params.get("Date")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| 29.321886 | 114 | 0.584834 |
6c1c57133d365929f33182aacc796a9de43188cb | 222 | py | Python | fib.py | GiorgiMatcharashvili/Solve-the-math-problem- | a7a97d0e784da16f110006d2e7997162c98af707 | [
"MIT"
] | null | null | null | fib.py | GiorgiMatcharashvili/Solve-the-math-problem- | a7a97d0e784da16f110006d2e7997162c98af707 | [
"MIT"
] | null | null | null | fib.py | GiorgiMatcharashvili/Solve-the-math-problem- | a7a97d0e784da16f110006d2e7997162c98af707 | [
"MIT"
] | null | null | null | def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
n = -1
while True:
n = int(n) + 1
print(fib(n), end="; ")
| 13.058824 | 39 | 0.36036 |
9226baf5341835fd2eae6b417d187f2a7a645e18 | 7,401 | py | Python | train.py | liuyao12/pytorch-cifar | dde5080c16d5a4c3d5861e547862761c2e661b95 | [
"MIT"
] | null | null | null | train.py | liuyao12/pytorch-cifar | dde5080c16d5a4c3d5861e547862761c2e661b95 | [
"MIT"
] | null | null | null | train.py | liuyao12/pytorch-cifar | dde5080c16d5a4c3d5861e547862761c2e661b95 | [
"MIT"
] | 1 | 2019-12-12T19:33:55.000Z | 2019-12-12T19:33:55.000Z | # the code mostly from https://github.com/sdoria/SimpleSelfAttention
#based on @grankin FastAI forum script
#updated by lessw2020 to use Mish XResNet
# adapted from https://github.com/fastai/fastai/blob/master/examples/train_imagenette.py
# changed per gpu bs for bs_rat
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
from fastprogress import fastprogress
from torchvision.models import *
#from fastai.vision.models.xresnet import *
#from fastai.vision.models.xresnet2 import *
#from fastai.vision.models.presnet import *
#from x2resnet import *
from mxresnet import *
from functools import partial
torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80
def get_data(size, woof, bs, workers=None):
# if size<=128: path = URLs.IMAGEWOOF_160 if woof else URLs.IMAGENETTE_160
# elif size<=224: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320
# else :
if woof:
path = URLs.IMAGEWOOF # if woof
else:
path = URLs.IMAGENETTE
path = untar_data(path)
n_gpus = num_distrib() or 1
if workers is None: workers = min(8, num_cpus()//n_gpus)
return (ImageList.from_folder(path).split_by_folder(valid='val')
.label_from_folder().transform(([flip_lr(p=0.5)], []), size=size)
.databunch(bs=bs, num_workers=workers)
.presize(size, scale=(0.35,1))
.normalize(imagenet_stats))
#from radam import *
#from novograd import *
#from rangervar import *
from ranger import *
#from ralamb import *
#from over9000 import *
#from lookahead import *
#from adams import *
#from rangernovo import *
#from rangerlars import *
def fit_with_annealing(learn:Learner, num_epoch:int, lr:float=defaults.lr, annealing_start:float=0.7)->None:
n = len(learn.data.train_dl)
anneal_start = int(n*num_epoch*annealing_start)
phase0 = TrainingPhase(anneal_start).schedule_hp('lr', lr)
phase1 = TrainingPhase(n*num_epoch - anneal_start).schedule_hp('lr', lr, anneal=annealing_cos)
phases = [phase0, phase1]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
learn.fit(num_epoch)
def train(
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-6,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=256,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='adam',
arch: Param("Architecture (xresnet34, xresnet50)", str)='xresnet50',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
dump: Param("Print model; don't train", int)=0,
lrfinder: Param("Run learning rate finder; don't train", int)=0,
log: Param("Log file name", str)='log',
sched_type: Param("LR schedule type", str)='one_cycle',
ann_start: Param("Mixup", float)=-1.0,
):
"Distributed training of Imagenette."
bs_one_gpu = bs
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps)
elif opt=='radam' : opt_func = partial(RAdam, betas=(mom,alpha), eps=eps)
elif opt=='novograd' : opt_func = partial(Novograd, betas=(mom,alpha), eps=eps)
elif opt=='rms' : opt_func = partial(optim.RMSprop, alpha=alpha, eps=eps)
elif opt=='sgd' : opt_func = partial(optim.SGD, momentum=mom)
elif opt=='rangervar' : opt_func = partial(RangerVar, betas=(mom,alpha), eps=eps)
elif opt=='ranger' : opt_func = partial(Ranger, betas=(mom,alpha), eps=eps)
elif opt=='ralamb' : opt_func = partial(Ralamb, betas=(mom,alpha), eps=eps)
elif opt=='over9000' : opt_func = partial(Over9000, k=12, betas=(mom,alpha), eps=eps)
elif opt=='lookahead' : opt_func = partial(LookaheadAdam, betas=(mom,alpha), eps=eps)
elif opt=='Adams': opt_func=partial(Adams)
elif opt=='rangernovo': opt_func=partial(RangerNovo)
elif opt=='rangerlars':opt_func=partial(RangerLars)
data = get_data(size, woof, bs)
bs_rat = bs/bs_one_gpu #originally bs/256
if gpu is not None: bs_rat *= max(num_distrib(), 1)
if not gpu: print(f'lr: {lr}; eff_lr: {lr*bs_rat}; size: {size}; alpha: {alpha}; mom: {mom}; eps: {eps}')
lr *= bs_rat
m = globals()[arch]
log_cb = partial(CSVLogger,filename=log)
learn = (Learner(data, m(c_out=10, sa=sa,sym=sym), wd=1e-2, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy(),
callback_fns=[log_cb])
)
print(learn.path)
n = len(learn.data.train_dl)
ann_start2= int(n*epochs*ann_start)
print(ann_start2," annealing start")
if dump: print(learn.model); exit()
if mixup: learn = learn.mixup(alpha=mixup)
learn = learn.to_fp16(dynamic=True)
if gpu is None: learn.to_parallel()
elif num_distrib()>1: learn.to_distributed(gpu) # Requires `-m fastai.launch`
for name, param in learn.model.named_parameters():
if "radii" in name:
print(name, param.mean().item())
if lrfinder:
# run learning rate finder
IN_NOTEBOOK = 1
learn.lr_find(wd=1e-2)
learn.recorder.plot()
else:
if sched_type == 'one_cycle':
learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3)
elif sched_type == 'flat_and_anneal':
fit_with_annealing(learn, epochs, lr, ann_start)
for name, param in learn.model.named_parameters():
if "radii" in name:
print(name, param.mean().item())
return learn.recorder.metrics[-1][0]
@call_parse
def main(
run: Param("Number of run", int)=5,
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-6,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=256,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='adam',
arch: Param("Architecture (mxresnet34, mxresnet50)", str)='mxresnet50',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
dump: Param("Print model; don't train", int)=0,
lrfinder: Param("Run learning rate finder; don't train", int)=0,
log: Param("Log file name", str)='log',
sched_type: Param("LR schedule type", str)='one_cycle',
ann_start: Param("Mixup", float)=-1.0,
):
acc = np.array(
[train(gpu,woof,lr,size,alpha,mom,eps,epochs,bs,mixup,opt,arch,sa,sym,dump,lrfinder,log,sched_type,ann_start)
for i in range(run)])
print(acc)
print(np.mean(acc))
print(np.std(acc)) | 40.005405 | 117 | 0.639643 |
d6874f823d20e5a19275231dcd5d73e3345325b1 | 3,554 | py | Python | bindings/python/ensmallen/datasets/string/streptacidiphilusalbus.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-09-10T18:31:58.000Z | 2022-03-24T04:28:04.000Z | bindings/python/ensmallen/datasets/string/streptacidiphilusalbus.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/streptacidiphilusalbus.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Streptacidiphilus albus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def StreptacidiphilusAlbus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Streptacidiphilus albus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Streptacidiphilus albus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptacidiphilusAlbus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.907407 | 223 | 0.678109 |
12fd60cfda59d765a711ea507a2b1701e684f05f | 165 | py | Python | tradercompany/activation_funcs.py | yyamaguchi/tradercompany | 42036f2fd8360f448e3a45fcf7a01331f7732fb8 | [
"Apache-2.0"
] | null | null | null | tradercompany/activation_funcs.py | yyamaguchi/tradercompany | 42036f2fd8360f448e3a45fcf7a01331f7732fb8 | [
"Apache-2.0"
] | 1 | 2021-11-19T14:51:46.000Z | 2021-11-19T14:51:46.000Z | tradercompany/activation_funcs.py | yoshida-chem/tradercompany | 42036f2fd8360f448e3a45fcf7a01331f7732fb8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def identity(x):
return x
def tanh(x):
return np.tanh(x)
def sign(x):
return (x > 0.0) * 1.0
def ReLU(x):
return sign(x) * x
| 11 | 26 | 0.569697 |
5391a19f386bc3c29de0a22d32b020aa5b5530cb | 2,119 | py | Python | tests/ip/traceroute/test_ip_traceroute_01.py | mingchik/happy | 5d998f4aa01d375770fa57a23f819dcf9f434625 | [
"Apache-2.0"
] | null | null | null | tests/ip/traceroute/test_ip_traceroute_01.py | mingchik/happy | 5d998f4aa01d375770fa57a23f819dcf9f434625 | [
"Apache-2.0"
] | null | null | null | tests/ip/traceroute/test_ip_traceroute_01.py | mingchik/happy | 5d998f4aa01d375770fa57a23f819dcf9f434625 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Calls traceroute between nodes.
#
import os
import unittest
import happy.HappyStateLoad
import happy.HappyStateUnload
import happy.Traceroute
class test_ip_traceroute_01(unittest.TestCase):
def setUp(self):
self.topology_file = os.path.dirname(os.path.realpath(__file__)) + \
"/../../../topologies/three_nodes_on_thread_weave.json"
# setting Mesh for thread test
options = happy.HappyStateLoad.option()
options["quiet"] = True
options["json_file"] = self.topology_file
setup_network = happy.HappyStateLoad.HappyStateLoad(options)
ret = setup_network.run()
def tearDown(self):
# cleaning up
options = happy.HappyStateUnload.option()
options["quiet"] = True
options["json_file"] = self.topology_file
teardown_network = happy.HappyStateUnload.HappyStateUnload(options)
teardown_network.run()
def test_ip_traceroute(self):
# Simple traceroute betwenn node00 and node01
options = happy.Traceroute.option()
options["quiet"] = False
options["source"] = "node01"
options["destination"] = "node02"
traceroute = happy.Traceroute.Traceroute(options)
ret = traceroute.run()
value = ret.Value()
data = ret.Data()
self.assertTrue(value < 11, "%s < 11 %%" % (str(value)))
if __name__ == "__main__":
unittest.main()
| 29.430556 | 77 | 0.664936 |
1e973644f5e34a70b36b341434cc512f702e4d80 | 2,627 | py | Python | count_filing_arrivals.py | DataFinnovation/api-demos-python | 1b5cf3334c537b9a09bcb8973c030ad7f19dd2ba | [
"Apache-2.0"
] | 1 | 2019-10-04T18:20:43.000Z | 2019-10-04T18:20:43.000Z | count_filing_arrivals.py | DataFinnovation/api-demos-python | 1b5cf3334c537b9a09bcb8973c030ad7f19dd2ba | [
"Apache-2.0"
] | null | null | null | count_filing_arrivals.py | DataFinnovation/api-demos-python | 1b5cf3334c537b9a09bcb8973c030ad7f19dd2ba | [
"Apache-2.0"
] | null | null | null | """counts the number of filings for each month going back years"""
import calendar
import datetime
from oauth2_wrappers import gen_token
from df_wrappers import documents_dslquery
def main():
"""example code lives in one function"""
# generate a token, we will be sending several queries off
token = gen_token()
# which source to count
# mainly US SEC and Japan EDINET have large histories
source_to_count = "US SEC"
# this query filters for company names from a list
# where field values contain a word off a list
# filed in the last 180 days
# this is all standard Elasticsearch DSL
dsl_dict = {
"query": {
"constant_score" : {
"filter" : {
"bool" : {
"must" : [
{"term" : {"filingsource": source_to_count}},
{"range" : {"filingtime" : {"gte" : "2018-01-01",
"lt" : "2018-01-31"}}}
]
}
}
}
}
}
# use a pointer to shorten the assignments below
array_ref = dsl_dict["query"]["constant_score"]["filter"]["bool"]["must"][1]
# set the number of returned results to 1
# as all we care about is the totalHits entry anyway
param_dict = {'maxresult' : 1}
# print out csv headers
print(','.join(['start date', 'end date', 'number filings']))
for year in range(2010, 2020):
for month in range(0, 12):
# beginning of range
start_date = datetime.datetime(year, month+1, 1)
# if this is past today dont bother as surely 0
if start_date > datetime.datetime.utcnow():
continue
start_date_str = start_date.strftime("%Y-%m-%d")
# end of range
last_day = calendar.monthrange(year, month+1)[1]
end_date = datetime.datetime(year, month+1, last_day)
end_date_str = end_date.strftime("%Y-%m-%d")
# assign this date range
array_ref["range"]["filingtime"]["gte"] = start_date_str
array_ref["range"]["filingtime"]["lte"] = end_date_str
# send off the query
resp_data = documents_dslquery(dsl_dict, token=token, params=param_dict)
# read the number of total matches from ES
num_hits = resp_data['totalHits']
# format and print the line
res_list = [start_date_str, end_date_str, str(num_hits)]
print(','.join(res_list))
main()
# eof
| 33.253165 | 84 | 0.553864 |
6501da01747441c1fd96b18b4528eb61b79f7d98 | 731 | py | Python | com/LimePencil/Q3602/iChess.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q3602/iChess.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q3602/iChess.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | # O(1)
import sys
input = sys.stdin.readline
n,m=sorted(map(int,input().split()))
if m==0:
print("Impossible")
else:
if n==m:
print(int((n*2)**0.5))
else:
print(int((n*2+1)**0.5))
# bruteforce
# import sys
# input = sys.stdin.readline
# n,m=sorted(map(int,input().split()))
# if m==0:
# print("Impossible")
# else:
# ans=0
# for i in range(1,142):
# total_tiles=i**2
# if i%2==0:
# w_tiles = total_tiles//2
# b_tiles = total_tiles//2
# else:
# w_tiles = total_tiles//2
# b_tiles = total_tiles//2+1
# if w_tiles <= n and b_tiles <= m:
# ans=i
# else:
# break
# print(ans) | 19.756757 | 43 | 0.491108 |
490a91f4d26630bcec17daef03361bf6f22f198e | 135 | py | Python | challenges/2.2.Strings/main.py | pradeepsaiu/python-coding-challenges | b435ab650d85de267eeaa31a55ff77ef5dbff86b | [
"BSD-3-Clause"
] | 141 | 2017-05-07T00:38:22.000Z | 2022-03-25T10:14:25.000Z | challenges/2.2.Strings/main.py | pradeepsaiu/python-coding-challenges | b435ab650d85de267eeaa31a55ff77ef5dbff86b | [
"BSD-3-Clause"
] | 23 | 2017-05-06T23:57:37.000Z | 2018-03-23T19:07:32.000Z | challenges/2.2.Strings/main.py | pradeepsaiu/python-coding-challenges | b435ab650d85de267eeaa31a55ff77ef5dbff86b | [
"BSD-3-Clause"
] | 143 | 2017-05-07T09:33:35.000Z | 2022-03-12T21:04:13.000Z | ### Modify the code below ###
myName = null
myAge = null
favoriteActivity = null
mySentence = null
### Modify the code above ###
| 11.25 | 29 | 0.659259 |
c7181af34b73767e68fee9171d8e94dd0c0c2b63 | 2,552 | py | Python | detect.py | ReesaJohn/yolov3-tf2 | b87d321e609b17c446bd94a777be33d0eb2e3806 | [
"MIT"
] | null | null | null | detect.py | ReesaJohn/yolov3-tf2 | b87d321e609b17c446bd94a777be33d0eb2e3806 | [
"MIT"
] | null | null | null | detect.py | ReesaJohn/yolov3-tf2 | b87d321e609b17c446bd94a777be33d0eb2e3806 | [
"MIT"
] | null | null | null | import time
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('image', './data/girl.png', 'path to input image')
flags.DEFINE_string('tfrecord', None, 'tfrecord instead of image')
flags.DEFINE_string('output', './output.jpg', 'path to output image')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights).expect_partial()
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
if FLAGS.tfrecord:
dataset = load_tfrecord_dataset(
FLAGS.tfrecord, FLAGS.classes, FLAGS.size)
dataset = dataset.shuffle(512)
img_raw, _label = next(iter(dataset.take(1)))
else:
img_raw = tf.image.decode_image(
open(FLAGS.image, 'rb').read(), channels=3)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
logging.info('time: {}'.format(t2 - t1))
logging.info('detections:')
for i in range(nums[0]):
logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(FLAGS.output, img)
logging.info('output saved to: {}'.format(FLAGS.output))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| 33.578947 | 75 | 0.660658 |
b7004de9349015c5744923538414d1e804f8501d | 952 | py | Python | session01_Decorators/ex05.py | morales-gregorio/Python-Module-of-the-Week | 2c68e20be3e174be9b91c92ac872806dd982e7d2 | [
"MIT"
] | 15 | 2017-06-22T11:57:38.000Z | 2022-03-31T13:34:07.000Z | session01_Decorators/ex05.py | morales-gregorio/Python-Module-of-the-Week | 2c68e20be3e174be9b91c92ac872806dd982e7d2 | [
"MIT"
] | 3 | 2019-10-16T10:32:55.000Z | 2020-01-09T09:24:48.000Z | session01_Decorators/ex05.py | morales-gregorio/Python-Module-of-the-Week | 2c68e20be3e174be9b91c92ac872806dd982e7d2 | [
"MIT"
] | 6 | 2016-10-07T12:50:24.000Z | 2019-11-28T11:15:04.000Z | # -*- coding: utf-8 -*-
"""
Exercise: listize decorator
When a function returns a list of results, we might need
to gather those results in a list:
def lucky_numbers(n):
ans = []
for i in range(n):
if i % 7 != 0:
continue
if sum(int(digit) for digit in str(i)) % 3 != 0:
continue
ans.append(i)
return ans
This looks much nicer when written as a generator.
① Convert lucky_numbers to be a generator.
② Write a 'listize' decorator which gathers the results from a
generator and returns a list and use it to wrap the new lucky_numbers().
Subexercise: ③ Write an 'arrayize' decorator which returns the results
in a numpy array instead of a list.
>>> @listize
... def f():
... yield 1
... yield 2
>>> f()
[1, 2]
"""
import functools
def listize(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return functools.update_wrapper(wrapper, func)
| 22.666667 | 72 | 0.644958 |
1c6282d74585d1341e5bfa7be3e6ecc049191314 | 3,997 | py | Python | cipher_program/encode.py | patrickleweryharris/Enigma | 2e12066f39185889fae79f9c9d844bc67a035355 | [
"MIT"
] | 1 | 2015-12-24T04:20:38.000Z | 2015-12-24T04:20:38.000Z | cipher_program/encode.py | patrickleweryharris/Enigma | 2e12066f39185889fae79f9c9d844bc67a035355 | [
"MIT"
] | null | null | null | cipher_program/encode.py | patrickleweryharris/Enigma | 2e12066f39185889fae79f9c9d844bc67a035355 | [
"MIT"
] | null | null | null | # Functions for encoding a message with an enigma machine comprised of n rotors
# Could be combined with decode.py using instanced variables
from enigma import Enigma
def _process_messages(msg):
"""
Sanitize the message to something friendlier to the encryption program
@type msg: str
@rtype: None
"""
cleaned_message = ''
for char in msg.upper():
if char.isalpha():
cleaned_message += char
return cleaned_message
def _create_ascii_encoding(msg):
"""
Turn the sanitized message into a version encoded into ordinals.
@type msg: str
@rtype: [int]
"""
returned_list = []
for char in msg:
returned_list.append(ord(char) - 65)
return returned_list
def rotor(machine, message, rotor_num, ring_num): # FIXME Rename to reflect that this method is now ring and rotor
"""
Singular function for all rotors of an enigma machine
@type machine: Enigma
@type message: [int]
@type rotor_num: int
@type ring_num: int
@rtype: None
"""
returned_str = []
rotor_pos = machine.rotor_settings[rotor_num]
starting_pos = rotor_pos
for char in message:
char = char + rotor_pos
rotor_pos += 1
if rotor_pos == 27:
rotor_pos = 1
if rotor_pos - 26 == starting_pos: # Hardcoded ring setting
rotor_pos = starting_pos # Makes the rotors circular
next_rotor = _get_next_rotor(rotor_num)
if next_rotor == 1 or next_rotor == 2:
machine.rotor_settings[rotor_num + 1] += 1
returned_str.append(char)
message = returned_str
def _ring(rotor_setting, ring_num):
"""
Singular function for all rings of an enigma machine
@type rotor_setting: int
@type ring_num: int
@rtype: int
"""
if ring_num == 0:
# special condition for first rotor?
return 0 # Needs to be something different
if rotor_setting == ring_num:
return 1
else:
return 0
# FIXME Ring function is no longer needed. Has been hardcode in line 49
def _get_next_rotor(rotor_num):
"""
Get the next rotor to move
@type rotor_num: int
@rtype: int
"""
if rotor_num == 0:
return 1
elif rotor_num == 1:
return 2
else:
return 0
def plugs(machine, message):
"""
Encrypt via the plug settings of an enigma machine
Calibrates based on the machine's plug settings
@type machine: Enigma
@type message: [int]
@rtype: None
"""
print(message)
for i in range(len(message)):
for plug in machine.plug_settings:
if message[i] == plug[0]:
message[i] = plug[1]
def return_to_string(msg):
"""
Return a string of the encoded message
@type msg: [int]
@rtype: str
"""
returned_str = ""
for char in msg:
returned_str += chr(char + 65)
return returned_str
# Main encryption function ----------------------------------------------------
def encipher(orig, machine):
"""
Return an encrypted message
@type orig: str
@type machine: Enigma
@rtype: str
"""
cleaned_message = _process_messages(orig)
ascii_message = _create_ascii_encoding(cleaned_message)
rotor(machine, ascii_message, 0, 0)
rotor(machine, ascii_message, 1, 1)
rotor(machine, ascii_message, 2, 6)
# One of the rotors shouldn't have a ring
# See special conditions in ring function on line 61
# Create a special case if the ring number is 0
plugs(machine, ascii_message)
end_msg = return_to_string(ascii_message)
return end_msg
# This program is currently hardcoded to a three rotor enigma variant,
# though it is compleatly extensible
if __name__ == "__main__":
machine = Enigma([4,5 , 6], [0], [[15, 14], [12, 11], [8, 20], [10, 9], [13, 7], [19, 24], [6, 1], [21, 5], [17, 4], [3, 2]])
message = 'test'
print(encipher(message, machine))
| 25.787097 | 129 | 0.621966 |
eecf3c5aeb6c6785cae3fd5808954a73db6190d6 | 15,936 | py | Python | tensorflow/contrib/boosted_trees/estimator_batch/model.py | Sonata-Wang/tensorflow | 8bbef0cd77879d05ed69bf30e76087847a8ca4a2 | [
"Apache-2.0"
] | 36 | 2016-12-17T15:25:25.000Z | 2022-01-29T21:50:53.000Z | tensorflow/contrib/boosted_trees/estimator_batch/model.py | shekharpalit/tensorflow | 6aa83398ab03bfae822f36772757097bcb98b6ed | [
"Apache-2.0"
] | 30 | 2016-10-04T15:38:08.000Z | 2020-07-16T12:09:33.000Z | tensorflow/contrib/boosted_trees/estimator_batch/model.py | shekharpalit/tensorflow | 6aa83398ab03bfae822f36772757097bcb98b6ed | [
"Apache-2.0"
] | 36 | 2017-07-27T21:12:40.000Z | 2022-02-03T16:45:56.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GTFlow Model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.estimator_batch import estimator_utils
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_util
class ModelBuilderOutputType(object):
MODEL_FN_OPS = 0
ESTIMATOR_SPEC = 1
def model_builder(features,
labels,
mode,
params,
config,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* learner_config: A config for the learner.
* feature_columns: An iterable containing all the feature columns used by
the model.
* examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
* weight_column_name: The name of weight column.
* center_bias: Whether a separate tree should be created for first fitting
the bias.
* override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
config: `RunConfig` of the estimator.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
head = params["head"]
learner_config = params["learner_config"]
examples_per_layer = params["examples_per_layer"]
feature_columns = params["feature_columns"]
weight_column_name = params["weight_column_name"]
num_trees = params["num_trees"]
use_core_libs = params["use_core_libs"]
logits_modifier_function = params["logits_modifier_function"]
output_leaf_index = params["output_leaf_index"]
override_global_step_value = params.get("override_global_step_value", None)
num_quantiles = params["num_quantiles"]
if features is None:
raise ValueError("At least one feature must be specified.")
if config is None:
raise ValueError("Missing estimator RunConfig.")
if config.session_config is not None:
session_config = config.session_config
session_config.allow_soft_placement = True
else:
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = config.replace(session_config=session_config)
center_bias = params["center_bias"]
if isinstance(features, ops.Tensor):
features = {features.name: features}
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
training_features = copy.copy(features)
training_features.pop(weight_column_name, None)
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
# Create GBDT model.
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=training_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index,
num_quantiles=num_quantiles)
with ops.name_scope("gbdt", "gbdt_optimizer"):
predictions_dict = gbdt_model.predict(mode)
logits = predictions_dict["predictions"]
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
training_hooks = []
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
training_hooks.append(
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value))
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_libs and callable(create_estimator_spec_op):
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
model_fn_ops.training_hooks.extend(training_hooks)
return model_fn_ops
elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
assert callable(create_estimator_spec_op)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
return estimator_spec
return model_fn_ops
def ranking_model_builder(features,
labels,
mode,
params,
config,
output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model for ranking.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* learner_config: A config for the learner.
* feature_columns: An iterable containing all the feature columns used by
the model.
* examples_per_layer: Number of examples to accumulate before growing a
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
* weight_column_name: The name of weight column.
* center_bias: Whether a separate tree should be created for first fitting
the bias.
* ranking_model_pair_keys (Optional): Keys to distinguish between features
for left and right part of the training pairs for ranking. For example,
for an Example with features "a.f1" and "b.f1", the keys would be
("a", "b").
* override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
config: `RunConfig` of the estimator.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
head = params["head"]
learner_config = params["learner_config"]
examples_per_layer = params["examples_per_layer"]
feature_columns = params["feature_columns"]
weight_column_name = params["weight_column_name"]
num_trees = params["num_trees"]
use_core_libs = params["use_core_libs"]
logits_modifier_function = params["logits_modifier_function"]
output_leaf_index = params["output_leaf_index"]
ranking_model_pair_keys = params["ranking_model_pair_keys"]
override_global_step_value = params.get("override_global_step_value", None)
num_quantiles = params["num_quantiles"]
if features is None:
raise ValueError("At least one feature must be specified.")
if config is None:
raise ValueError("Missing estimator RunConfig.")
center_bias = params["center_bias"]
if isinstance(features, ops.Tensor):
features = {features.name: features}
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
training_features = copy.copy(features)
training_features.pop(weight_column_name, None)
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
# Extract the features.
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
# For ranking pairwise training, we extract two sets of features.
if len(ranking_model_pair_keys) != 2:
raise ValueError("You must provide keys for ranking.")
left_pair_key = ranking_model_pair_keys[0]
right_pair_key = ranking_model_pair_keys[1]
if left_pair_key is None or right_pair_key is None:
raise ValueError("Both pair keys should be provided for ranking.")
features_1 = {}
features_2 = {}
for name in training_features:
feature = training_features[name]
new_name = name[2:]
if name.startswith(left_pair_key + "."):
features_1[new_name] = feature
else:
assert name.startswith(right_pair_key + ".")
features_2[new_name] = feature
main_features = features_1
supplementary_features = features_2
else:
# For non-ranking or inference ranking, we have only 1 set of features.
main_features = training_features
# Create GBDT model.
gbdt_model_main = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=main_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index,
num_quantiles=num_quantiles)
with ops.name_scope("gbdt", "gbdt_optimizer"):
# Logits for inference.
if mode == learn.ModeKeys.INFER:
predictions_dict = gbdt_model_main.predict(mode)
logits = predictions_dict[gbdt_batch.PREDICTIONS]
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
else:
gbdt_model_supplementary = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=center_bias,
examples_per_layer=examples_per_layer,
learner_config=learner_config,
feature_columns=feature_columns,
logits_dimension=head.logits_dimension,
features=supplementary_features,
use_core_columns=use_core_libs,
output_leaf_index=output_leaf_index)
# Logits for train and eval.
if not supplementary_features:
raise ValueError("Features for ranking must be specified.")
predictions_dict_1 = gbdt_model_main.predict(mode)
predictions_1 = predictions_dict_1[gbdt_batch.PREDICTIONS]
predictions_dict_2 = gbdt_model_supplementary.predict(mode)
predictions_2 = predictions_dict_2[gbdt_batch.PREDICTIONS]
logits = predictions_1 - predictions_2
if logits_modifier_function:
logits = logits_modifier_function(logits, features, mode)
predictions_dict = predictions_dict_1
predictions_dict[gbdt_batch.PREDICTIONS] = logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
update_op = gbdt_model_main.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
training_hooks = []
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = (
gbdt_model_main.get_number_of_trees_tensor())
training_hooks.append(
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value))
if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_libs and callable(create_estimator_spec_op):
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
gbdt_batch.LEAF_INDEX]
model_fn_ops.training_hooks.extend(training_hooks)
return model_fn_ops
elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
assert callable(create_estimator_spec_op)
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
estimator_spec = estimator_spec._replace(
training_hooks=training_hooks + list(estimator_spec.training_hooks))
return estimator_spec
return model_fn_ops
| 38.492754 | 81 | 0.712663 |
ebed145a9420170dd97caa01e8dd194f2645c886 | 2,434 | py | Python | SMSProject/venv/Lib/site-packages/scripts/checker_commons.py | LourencoFernando/SMS-Project | f8e13dafdb41aa01f79337819cc3033a532410e8 | [
"MIT"
] | null | null | null | SMSProject/venv/Lib/site-packages/scripts/checker_commons.py | LourencoFernando/SMS-Project | f8e13dafdb41aa01f79337819cc3033a532410e8 | [
"MIT"
] | null | null | null | SMSProject/venv/Lib/site-packages/scripts/checker_commons.py | LourencoFernando/SMS-Project | f8e13dafdb41aa01f79337819cc3033a532410e8 | [
"MIT"
] | null | null | null | import json, sys
from collections import defaultdict
def aggregate(pdf_filepath, report, aggregated_report_filepath):
agg_report = {
"failures": defaultdict(list),
"errors": defaultdict(list),
}
try:
with open(aggregated_report_filepath) as agg_file:
prev_agg_report = json.load(agg_file)
agg_report["failures"].update(prev_agg_report["failures"])
agg_report["errors"].update(prev_agg_report["errors"])
except FileNotFoundError:
print("Initializing a new JSON file for the aggregated report")
if "version" in report:
agg_report["version"] = report.pop("version")
if "failure" in report:
failure = report["failure"]
agg_report["failures"][failure].append(pdf_filepath)
else:
for error in report.get("errors", []):
agg_report["errors"][error].append(pdf_filepath)
with open(aggregated_report_filepath, "w") as agg_file:
json.dump(agg_report, agg_file)
def print_aggregated_report(
aggregated_report_filepath, checks_details_url, ignore_whitelist_filepath
):
with open(aggregated_report_filepath) as agg_file:
agg_report = json.load(agg_file)
if "version" in agg_report:
print(agg_report["version"])
print("Documentation on the checks:", checks_details_url)
print("# AGGREGATED REPORT #")
if agg_report["failures"]:
print("Failures:")
for failure, pdf_filepaths in agg_report["failures"].items():
print(f"- {failure} ({len(pdf_filepaths)}): {', '.join(pdf_filepaths)}")
print("Errors:")
sort_key = lambda error: -len(error[1])
for error, pdf_filepaths in sorted(agg_report["errors"].items(), key=sort_key):
print(f"- {error} ({len(pdf_filepaths)}): {', '.join(pdf_filepaths)}")
fail_on_unexpected_check_failure(agg_report, ignore_whitelist_filepath)
def fail_on_unexpected_check_failure(agg_report, ignore_whitelist_filepath):
"exit(1) if there is any non-passing & non-whitelisted error remaining"
with open(ignore_whitelist_filepath) as ignore_file:
ignore = json.load(ignore_file)
errors = set(agg_report["errors"].keys()) - set(ignore["errors"].keys())
if agg_report["failures"] or errors:
print(
"Non-whitelisted issues found:",
", ".join(sorted(agg_report["failures"].keys()) + sorted(errors)),
)
sys.exit(1)
| 40.566667 | 84 | 0.671323 |
208474ee6c69382d29064848b6bde0a9440dab93 | 5,107 | py | Python | bib2xyz.py | Ps2Fino/mendeley2csv | 0e3473340c06d5cadcfec3e80747417b78041f65 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T15:21:20.000Z | 2022-02-10T15:21:20.000Z | bib2xyz.py | Ps2Fino/mendeley2csv | 0e3473340c06d5cadcfec3e80747417b78041f65 | [
"BSD-3-Clause"
] | null | null | null | bib2xyz.py | Ps2Fino/mendeley2csv | 0e3473340c06d5cadcfec3e80747417b78041f65 | [
"BSD-3-Clause"
] | null | null | null | ##
## Extracts information of interest from
## Mendeley exported bib files
##
## Note this program expects input csv files
## to be complete with a header.
## Execuution is subject to unknowns without...
##
## @author Daniel J. Finnegan
## @date February 2019
import argparse
import os
import sys
from mendproc.bibmanager import BibManager
from mendproc import Parsers
cli_help="""
Processes Mendeley bibliographic entries. Can also optionally export the loaded
file into a different format.
"""
## Use this to dump the keywords from the input file
def dump_bib_keywords (manager, output_dir_path):
keywords = manager.dump_keywords (lowercase=True)
if output_dir_path is not None:
output_file_path = os.path.join (output_dir_path, 'keywords.txt')
else:
output_file_path = 'keywords.txt'
with open (output_file_path, 'w', encoding='utf8') as fp:
for keyword in keywords:
fp.write (keyword + '\n')
print ('Dumped keywords to', output_file_path)
def dump_bib_authors (manager, output_dir_path):
authors = manager.dump_authors ()
if output_dir_path is not None:
output_file_path = os.path.join (output_dir_path, 'authors.txt')
else:
output_file_path = 'authors.txt'
with open (output_file_path, 'w', encoding='utf8') as fp:
for author in authors:
fp.write (author + '\n')
print ('Dumped author list to', output_file_path)
def process_args (bibmanager, arguments, output_dir_path):
if args.pattern != '' and args.pattern is not None:
bibmanager.cutoff_keywords_regex (args.pattern)
if args.cutoff_year != '' and args.cutoff_year is not None:
bibmanager.cutoff_year (int(args.cutoff_year))
if args.dump_keywords:
dump_bib_keywords (bibmanager, output_dir_path)
if args.dump_authors:
dump_bib_authors (bibmanager, output_dir_path)
return (bibmanager)
def main (args, input_file_path):
if args.output_dir:
output_dir_path = os.path.abspath (args.output_dir)
if not os.path.isdir (output_dir_path):
os.mkdir (output_dir_path)
else:
output_dir_path = None
# File I/O
with open (input_file_path, 'r', encoding='utf8') as in_file:
in_lines = in_file.readlines ()
manager = BibManager ()
manager.lines2entries (in_lines, data_type=args.input_format) # Load into ADT
## Process
manager = process_args (manager, args, output_dir_path)
## Save the file
if args.save_file is not None:
if output_dir_path is not None:
output_file_path = os.path.join (output_dir_path, args.save_file)
else:
output_file_path = os.path.abspath (args.save_file)
print ('Saving to', output_file_path)
manager.entries2lines (data_type=args.output_format) # Export to desired DT
out_lines = manager.lines
with open (output_file_path, 'w', encoding='utf8') as out_file:
for line in out_lines:
out_file.write (line + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser (description=cli_help)
parser.add_argument (dest='input_file', help='The file to load bib entries from. See README for implemented formats')
input_format_group = parser.add_argument_group (title='Input formats')
input_format_group.add_argument ('--input-format', dest='input_format', action='store', help='Input file format', default='bibtex')
input_format_group.add_argument ('--output-format', dest='output_format', action='store', help='Output file format', default='csv')
input_format_group.add_argument ('--output-dir', dest='output_dir', action='store', help='Output', default='output')
command_group = parser.add_argument_group (title='Commands')
command_group.add_argument ('--dump-keywords', dest='dump_keywords', action='store_true', help='Dump the entry keywords to a file', default=True)
command_group.add_argument ('--dump-authors', dest='dump_authors', action='store_true', help='Dump the entry authors to a file', default=True)
command_group.add_argument ('--output-file', dest='save_file', action='store', help='The file to export bib entries to. If a file exists, it will be silently overwritten', default='output.csv')
command_group.add_argument ('--cutoff-year', dest='cutoff_year', action='store', help='Ignore entries older than year specified')
command_group.add_argument ('--keyword-regex', dest='pattern', action='store', help='Ignore entries that don\'t match')
args = parser.parse_args ()
## Check dependencies
if args.save_file and not args.output_format:
print ('You must specify an output format when saving a file. Aborting...')
sys.exit ()
## Load the file
input_file_path = os.path.abspath (args.input_file)
if not os.path.exists (input_file_path) or os.path.isdir (input_file_path):
print ('Input file doesn\'t exist or is a directory. Aborting...')
sys.exit ()
## Arguments have been parsed. Now call the program
main (args, input_file_path) | 39.589147 | 197 | 0.69767 |
6d96500eb6bb82fa5a168f0a7a0c9631f51d60ce | 16,607 | py | Python | demo/predictor.py | ZhongYingMatrix/maskrcnn-benchmark | 6238aff4414dedecf3d02a97c4f39c2e4cf8d35b | [
"MIT"
] | null | null | null | demo/predictor.py | ZhongYingMatrix/maskrcnn-benchmark | 6238aff4414dedecf3d02a97c4f39c2e4cf8d35b | [
"MIT"
] | null | null | null | demo/predictor.py | ZhongYingMatrix/maskrcnn-benchmark | 6238aff4414dedecf3d02a97c4f39c2e4cf8d35b | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image):
size = self.get_size(image.size)
image = F.resize(image, size)
return image
class COCODemo(object):
# COCO categories for pretty print
CATEGORIES = [
"__background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __init__(
self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
transform = T.Compose(
[
T.ToPILImage(),
Resize(min_size, max_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
#result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
if self.cfg.MODEL.KEYPOINT_ON:
result = self.overlay_keypoints(result, top_predictions)
#result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = torch.rand(labels.size())[:,None] * self.palette.type(torch.float32)
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
image = image.astype(float)
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, [255,255,255], 1)
idx = np.nonzero(thresh[:,:,0])
for i in range(3):
image[idx[0],idx[1],i] *= 0.6
image[idx[0],idx[1],i] += 0.4 * color[i]
image = image.astype(int)
composite = image
return composite
def overlay_keypoints(self, image, predictions):
keypoints = predictions.get_field("keypoints")
kps = keypoints.keypoints
scores = keypoints.get_field("logits")
kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()
for region in kps:
image = vis_keypoints(image, region.transpose((1, 0)))
return image
def create_mask_montage(self, image, predictions):
"""
Create a montage showing the probability heatmaps for each one one of the
detected objects
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask`.
"""
masks = predictions.get_field("mask")
masks_per_dim = self.masks_per_dim
masks = L.interpolate(
masks.float(), scale_factor=1 / masks_per_dim
).byte()
height, width = masks.shape[-2:]
max_masks = masks_per_dim ** 2
masks = masks[:max_masks]
# handle case where we have less detections than max_masks
if len(masks) < max_masks:
masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
masks_padded[: len(masks)] = masks
masks = masks_padded
masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
result = torch.zeros(
(masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8
)
for y in range(masks_per_dim):
start_y = y * height
end_y = (y + 1) * height
for x in range(masks_per_dim):
start_x = x * width
end_x = (x + 1) * width
result[start_y:end_y, start_x:end_x] = masks[y, x]
return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
)
return image
import numpy as np
import matplotlib.pyplot as plt
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = PersonKeypoints.NAMES
kp_lines = PersonKeypoints.CONNECTIONS
# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(mid_hip),
color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(
kp_mask, p1, p2,
color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(
kp_mask, p1,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(
kp_mask, p2,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
| 35.109937 | 86 | 0.5865 |
6a50d2c4dd29aa9e8c40412489667a0777a00771 | 3,437 | py | Python | spider/middlewares.py | adamlabrash/Canadian-Constituencies | 5e555875bb0f436ec76c703bdcb64daa28d3d691 | [
"MIT"
] | 1 | 2020-08-18T15:52:16.000Z | 2020-08-18T15:52:16.000Z | spider/middlewares.py | adamlabrash/Canadian-Constituencies | 5e555875bb0f436ec76c703bdcb64daa28d3d691 | [
"MIT"
] | 1 | 2021-04-13T18:25:23.000Z | 2021-08-19T01:26:42.000Z | spider/middlewares.py | adamlabrash/Canadian-Constituencies | 5e555875bb0f436ec76c703bdcb64daa28d3d691 | [
"MIT"
] | 1 | 2021-04-13T17:58:08.000Z | 2021-04-13T17:58:08.000Z | from scrapy import signals
class DemocracyBotSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class DemocracyBotDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.43299 | 78 | 0.664533 |
fa4f251b58d3c027d0a2a4532e692001b69a79eb | 168 | py | Python | molsysmt/item/freezer/molsysmt_TrajectoryDict/__init__.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/freezer/molsysmt_TrajectoryDict/__init__.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/freezer/molsysmt_TrajectoryDict/__init__.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | from .is_molsysmt_TrajectoryDict import is_molsysmt_TrajectoryDict
from .to_molsysmt_Structures import to_molsysmt_Structures
from .to_file_trjpk import to_file_trjpk
| 33.6 | 66 | 0.904762 |
e1f9b0a59ab1696ba7937c037f3d14130f441d0e | 10,547 | py | Python | desktop/libs/notebook/src/notebook/connectors/flink_sql.py | aroville/hue | 63f5f9bcd18f9e76be1983a56137a30cbd96e49d | [
"Apache-2.0"
] | 1 | 2021-04-16T19:53:43.000Z | 2021-04-16T19:53:43.000Z | desktop/libs/notebook/src/notebook/connectors/flink_sql.py | aroville/hue | 63f5f9bcd18f9e76be1983a56137a30cbd96e49d | [
"Apache-2.0"
] | null | null | null | desktop/libs/notebook/src/notebook/connectors/flink_sql.py | aroville/hue | 63f5f9bcd18f9e76be1983a56137a30cbd96e49d | [
"Apache-2.0"
] | 4 | 2020-06-01T06:00:49.000Z | 2021-01-13T18:16:34.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import json
import posixpath
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.lib.i18n import force_unicode
from desktop.lib.rest.http_client import HttpClient, RestException
from desktop.lib.rest.resource import Resource
from notebook.connectors.base import Api, QueryError
LOG = logging.getLogger(__name__)
_JSON_CONTENT_TYPE = 'application/json'
_API_VERSION = 'v1'
SESSIONS = {}
SESSION_KEY = '%(username)s-%(connector_name)s'
n = 0
def query_error_handler(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except RestException as e:
try:
message = force_unicode(json.loads(e.message)['errors'])
except:
message = e.message
message = force_unicode(message)
raise QueryError(message)
except Exception as e:
message = force_unicode(str(e))
raise QueryError(message)
return decorator
class FlinkSqlApi(Api):
def __init__(self, user, interpreter=None):
Api.__init__(self, user, interpreter=interpreter)
self.options = interpreter['options']
self.db = FlinkSqlClient(user=user, api_url=self.options['api_url'])
@query_error_handler
def create_session(self, lang=None, properties=None):
session = self.db.create_session()
response = {
'type': lang,
'id': session['session_id']
}
return response
def _get_session(self):
session_key = SESSION_KEY % {
'username': self.user.username,
'connector_name': self.interpreter['name']
}
if session_key not in SESSIONS:
SESSIONS[session_key] = self.create_session()
try:
self.db.session_heartbeat(session_id=SESSIONS[session_key]['id'])
except Exception as e:
if 'Session: %(id)s does not exist' % SESSIONS[session_key] in str(e):
LOG.warn('Session: %(id)s does not exist, opening a new one' % SESSIONS[session_key])
SESSIONS[session_key] = self.create_session()
else:
raise e
return SESSIONS[session_key]
@query_error_handler
def execute(self, notebook, snippet):
global n
n = 0
session = self._get_session()
session_id = session['id']
job_id = None
resp = self.db.execute_statement(session_id=session_id, statement=snippet['statement'])
if resp['statement_types'][0] == 'SELECT':
job_id = resp['results'][0]['data'][0][0]
data, description = [], []
# TODO: change_flags
else:
data, description = resp['results'][0]['data'], resp['results'][0]['columns']
has_result_set = data is not None
return {
'sync': job_id is None,
'has_result_set': has_result_set,
'guid': job_id,
'result': {
'has_more': job_id is not None,
'data': data if job_id is None else [],
'meta': [{
'name': col['name'],
'type': col['type'],
'comment': ''
}
for col in description
] if has_result_set else [],
'type': 'table'
}
}
@query_error_handler
def check_status(self, notebook, snippet):
global n
response = {}
session = self._get_session()
statement_id = snippet['result']['handle']['guid']
status = 'expired'
if session:
if not statement_id: # Sync result
status = 'available'
else:
try:
resp = self.db.fetch_status(session['id'], statement_id)
if resp.get('status') == 'RUNNING':
status = 'streaming'
response['result'] = self.fetch_result(notebook, snippet, n, False)
elif resp.get('status') == 'FINISHED':
status = 'available'
elif resp.get('status') == 'FAILED':
status = 'failed'
elif resp.get('status') == 'CANCELED':
status = 'expired'
except Exception as e:
if '%s does not exist in current session' % statement_id in str(e):
LOG.warn('Job: %s does not exist' % statement_id)
else:
raise e
response['status'] = status
return response
@query_error_handler
def fetch_result(self, notebook, snippet, rows, start_over):
global n
session = self._get_session()
statement_id = snippet['result']['handle']['guid']
token = n #rows
resp = self.db.fetch_results(session['id'], job_id=statement_id, token=token)
next_result = resp.get('next_result_uri')
if next_result:
n = int(next_result.rsplit('/', 1)[-1])
return {
'has_more': bool(next_result),
'data': resp['results'][0]['data'], # No escaping...
'meta': [{
'name': column['name'],
'type': column['type'],
'comment': ''
}
for column in resp['results'][0]['columns']
],
'type': 'table'
}
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
response = {}
try:
if database is None:
response['databases'] = self.show_databases()
elif table is None:
response['tables_meta'] = self.show_tables(database)
elif column is None:
columns = self.get_columns(database, table)
response['columns'] = [col['name'] for col in columns]
response['extended_columns'] = [{
'comment': col.get('comment'),
'name': col.get('name'),
'type': col['type']
}
for col in columns
]
else:
response = {}
except Exception as e:
LOG.warn('Autocomplete data fetching error: %s' % e)
response['code'] = 500
response['error'] = str(e)
return response
def show_databases(self):
session = self._get_session()
session_id = session['id']
resp = self.db.execute_statement(session_id=session_id, statement='SHOW DATABASES')
return [db[0] for db in resp['results'][0]['data']]
def show_tables(self, database):
session = self._get_session()
session_id = session['id']
resp = self.db.execute_statement(session_id=session_id, statement='USE %(database)s' % {'database': database})
resp = self.db.execute_statement(session_id=session_id, statement='SHOW TABLES')
return [table[0] for table in resp['results'][0]['data']]
def get_columns(self, database, table):
session = self._get_session()
session_id = session['id']
resp = self.db.execute_statement(session_id=session_id, statement='USE %(database)s' % {'database': database})
resp = self.db.execute_statement(session_id=session_id, statement='DESCRIBE %(table)s' % {'table': table})
columns = json.loads(resp['results'][0]['data'][0][0])['columns']
return [{
'name': col['field_name'],
'type': col['field_type'], # Types to unify
'comment': '',
}
for col in columns
]
def cancel(self, notebook, snippet):
session = self._get_session()
statement_id = snippet['result']['handle']['guid']
try:
self.db.close_statement(session_id=session['id'], job_id=statement_id)
except Exception as e:
if 'does not exist in current session:' in str(e):
return {'status': -1} # skipped
else:
raise e
return {'status': 0}
def close_session(self, session):
session = self._get_session()
self.db.close_session(session['id'])
class FlinkSqlClient():
'''
Implements https://github.com/ververica/flink-sql-gateway
Could be a pip module or sqlalchemy dialect in the future.
'''
def __init__(self, user, api_url):
self.user = user
self._url = posixpath.join(api_url + '/' + _API_VERSION + '/')
self._client = HttpClient(self._url, logger=LOG)
self._root = Resource(self._client)
def __str__(self):
return "FlinkClient at %s" % (self._url,)
def info(self):
return self._root.get('info')
def create_session(self, **properties):
data = {
"session_name": "test", # optional
"planner": "blink", # required, "old"/"blink"
"execution_type": "streaming", # required, "batch"/"streaming"
"properties": { # optional
"key": "value"
}
}
data.update(properties)
return self._root.post('sessions', data=json.dumps(data), contenttype=_JSON_CONTENT_TYPE)
def session_heartbeat(self, session_id):
return self._root.post('sessions/%(session_id)s/heartbeat' % {'session_id': session_id})
def execute_statement(self, session_id, statement):
data = {
"statement": statement, # required
"execution_timeout": "" # execution time limit in milliseconds, optional, but required for stream SELECT ?
}
return self._root.post(
'sessions/%(session_id)s/statements' % {
'session_id': session_id
},
data=json.dumps(data),
contenttype=_JSON_CONTENT_TYPE
)
def fetch_status(self, session_id, job_id):
return self._root.get(
'sessions/%(session_id)s/jobs/%(job_id)s/status' % {
'session_id': session_id,
'job_id': job_id
}
)
def fetch_results(self, session_id, job_id, token=0):
return self._root.get(
'sessions/%(session_id)s/jobs/%(job_id)s/result/%(token)s' % {
'session_id': session_id,
'job_id': job_id,
'token': token
}
)
def close_statement(self, session_id, job_id):
return self._root.delete(
'sessions/%(session_id)s/jobs/%(job_id)s' % {
'session_id': session_id,
'job_id': job_id,
}
)
def close_session(self, session_id):
return self._root.delete(
'sessions/%(session_id)s' % {
'session_id': session_id,
}
)
| 28.73842 | 115 | 0.629089 |
0cb51fee428e3d6127409e278e5d20ad8bda6420 | 7,833 | py | Python | dl/face-parse/converter.py | showkeyjar/beauty | 7c944cf896c899d9e23b2e50e293103bb03fe6cd | [
"MulanPSL-1.0"
] | 1 | 2022-01-29T12:32:38.000Z | 2022-01-29T12:32:38.000Z | dl/face-parse/converter.py | showkeyjar/beauty | 7c944cf896c899d9e23b2e50e293103bb03fe6cd | [
"MulanPSL-1.0"
] | null | null | null | dl/face-parse/converter.py | showkeyjar/beauty | 7c944cf896c899d9e23b2e50e293103bb03fe6cd | [
"MulanPSL-1.0"
] | null | null | null | from typing import Optional
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
import shutil
import logging
import cv2
import numpy as np
import onnx
from onnx_tf.backend import prepare
import torch
import tensorflow as tf
from model import BiSeNet
class Torch2TFLiteConverter:
def __init__(
self,
torch_model_path: str,
tflite_model_save_path: str,
sample_file_path: Optional[str] = None,
target_shape: tuple = (224, 224, 3),
seed: int = 10,
normalize: bool = True
):
self.torch_model_path = torch_model_path
self.tflite_model_path = tflite_model_save_path
self.sample_file_path = sample_file_path
self.target_shape = target_shape
self.seed = seed
self.normalize = normalize
self.tmpdir = '/tmp/torch2tflite/'
self.__check_tmpdir()
self.onnx_model_path = os.path.join(self.tmpdir, 'model.onnx')
self.tf_model_path = os.path.join(self.tmpdir, 'tf_model')
self.torch_model = self.load_torch_model()
self.sample_data = self.load_sample_input(sample_file_path, target_shape, seed, normalize)
def convert(self):
self.torch2onnx()
self.onnx2tf()
self.tf2tflite()
torch_output = self.inference_torch()
tflite_output = self.inference_tflite(self.load_tflite())
self.calc_error(torch_output, tflite_output)
def __check_tmpdir(self):
try:
if os.path.exists(self.tmpdir) and os.path.isdir(self.tmpdir):
shutil.rmtree(self.tmpdir)
logging.info(f'Old temp directory removed')
os.makedirs(self.tmpdir, exist_ok=True)
logging.info(f'Temp directory created at {self.tmpdir}')
except Exception:
logging.error('Can not create temporary directory, exiting!')
sys.exit(-1)
def load_torch_model(self) -> torch.nn.Module:
try:
if self.torch_model_path.endswith('.pth') or self.torch_model_path.endswith('.pt'):
# 这里因为模型的特殊性,适当改造
# model = torch.load(self.torch_model_path, map_location='cpu')
# model = model.eval()
n_classes = 19
model = BiSeNet(n_classes=n_classes)
# 这里不要加载到gpu,否则转换失败
# model.cuda()
model.load_state_dict(torch.load(self.torch_model_path, map_location='cpu'))
model = model.eval()
logging.info('PyTorch model successfully loaded')
return model
else:
logging.error('Specified file path not compatible with torch2tflite, exiting!')
sys.exit(-1)
except Exception as e:
logging.warning(e)
logging.error('Can not load PyTorch model. Please make sure'
'that model saved like `torch.save(model, PATH)`')
sys.exit(-1)
def load_tflite(self):
interpret = tf.lite.Interpreter(self.tflite_model_path)
interpret.allocate_tensors()
logging.info(f'TFLite interpreter successfully loaded from, {self.tflite_model_path}')
return interpret
@staticmethod
def load_sample_input(
file_path: Optional[str] = None,
target_shape: tuple = (224, 224, 3),
seed: int = 10,
normalize: bool = True
):
if file_path is not None:
print("input shape: ", str(target_shape))
target_shape = tuple([int(ix) for ix in target_shape])
if (len(target_shape) == 3 and target_shape[-1] == 1) or len(target_shape) == 2:
imread_flags = cv2.IMREAD_GRAYSCALE
elif len(target_shape) == 3 and target_shape[-1] == 3:
imread_flags = cv2.IMREAD_COLOR
else:
imread_flags = cv2.IMREAD_ANYCOLOR + cv2.IMREAD_ANYDEPTH
try:
img = cv2.resize(
src=cv2.imread(file_path, imread_flags),
dsize=target_shape[:2],
interpolation=cv2.INTER_LINEAR
)
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if normalize:
img = img * 1. / 255
img = img.astype(np.float32)
sample_data_np = np.transpose(img, (2, 0, 1))[np.newaxis, :, :, :]
sample_data_torch = torch.from_numpy(sample_data_np)
logging.info(f'Sample input successfully loaded from, {file_path}')
except Exception as e:
logging.exception(e)
logging.error(f'Can not load sample input from, {file_path}')
sys.exit(-1)
else:
logging.info(f'Sample input file path not specified, random data will be generated')
np.random.seed(seed)
data = np.random.random(target_shape).astype(np.float32)
sample_data_np = np.transpose(data, (2, 0, 1))[np.newaxis, :, :, :]
sample_data_torch = torch.from_numpy(sample_data_np)
logging.info(f'Sample input randomly generated')
return {'sample_data_np': sample_data_np, 'sample_data_torch': sample_data_torch}
def torch2onnx(self) -> None:
torch.onnx.export(
model=self.torch_model,
args=self.sample_data['sample_data_torch'],
f=self.onnx_model_path,
verbose=False,
export_params=True,
do_constant_folding=False,
input_names=['input'],
opset_version=11,
output_names=['output'])
def onnx2tf(self) -> None:
onnx_model = onnx.load(self.onnx_model_path)
onnx.checker.check_model(onnx_model)
tf_rep = prepare(onnx_model)
tf_rep.export_graph(self.tf_model_path)
def tf2tflite(self) -> None:
converter = tf.lite.TFLiteConverter.from_saved_model(self.tf_model_path)
# 这里做float16量化转换
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
with open(self.tflite_model_path, 'wb') as f:
f.write(tflite_model)
def inference_torch(self) -> np.ndarray:
y_pred = self.torch_model(self.sample_data['sample_data_torch'])
return y_pred[0].detach().cpu().numpy()
def inference_tflite(self, tflite_model) -> np.ndarray:
input_details = tflite_model.get_input_details()
output_details = tflite_model.get_output_details()
tflite_model.set_tensor(input_details[0]['index'], self.sample_data['sample_data_np'])
tflite_model.invoke()
y_pred = tflite_model.get_tensor(output_details[0]['index'])
return y_pred
@staticmethod
def calc_error(result_torch, result_tflite):
mse = ((result_torch - result_tflite) ** 2).mean(axis=None)
mae = np.abs(result_torch - result_tflite).mean(axis=None)
logging.info(f'MSE (Mean-Square-Error): {mse}\tMAE (Mean-Absolute-Error): {mae}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--torch-path', type=str, required=True)
parser.add_argument('--tflite-path', type=str, required=True)
parser.add_argument('--target-shape', type=int, nargs=3, default=(224, 224, 3))
parser.add_argument('--sample-file', type=str)
parser.add_argument('--seed', type=int, default=10)
args = parser.parse_args()
conv = Torch2TFLiteConverter(
args.torch_path,
args.tflite_path,
args.sample_file,
tuple(args.target_shape),
args.seed
)
conv.convert()
sys.exit(0) | 37.84058 | 98 | 0.608962 |
a1196160903cd2719f3510b947885ecc231f4037 | 52,683 | py | Python | tests/python/unittest/test_numpy_ndarray.py | ChaokunChang/incubator-mxnet | 3a5c78aa145411f01f9ce636b6a0f798b4730433 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_numpy_ndarray.py | ChaokunChang/incubator-mxnet | 3a5c78aa145411f01f9ce636b6a0f798b4730433 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_numpy_ndarray.py | ChaokunChang/incubator-mxnet | 3a5c78aa145411f01f9ce636b6a0f798b4730433 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
import itertools
import os
import unittest
import numpy as _np
import mxnet as mx
from mxnet import np, npx, autograd
from mxnet.gluon import HybridBlock
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, use_np
from common import with_seed, TemporaryDirectory
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, is_op_runnable, collapse_sum_like
from mxnet.ndarray.ndarray import py_slice
from mxnet.base import integer_types
@with_seed()
@use_np
def test_np_empty():
# (input dtype, expected output dtype)
dtype_pairs = [
(np.int8, np.int8),
(np.int32, np.int32),
(np.float16, np.float16),
(np.float32, np.float32),
(np.float64, np.float64),
(np.bool_, np.bool_),
(np.bool, np.bool_),
('int8', np.int8),
('int32', np.int32),
('float16', np.float16),
('float32', np.float32),
('float64', np.float64),
('bool', np.bool_),
(None, np.float32),
]
orders = ['C', 'F', 'A']
shapes = [
(),
0,
(0,),
(0, 0),
2,
(2,),
(3, 0),
(4, 5),
(1, 1, 1, 1),
]
ctxes = [npx.current_context(), None]
for dtype, expected_dtype in dtype_pairs:
for shape in shapes:
for order in orders:
for ctx in ctxes:
if order == 'C':
ret = np.empty(shape, dtype, order, ctx)
assert ret.dtype == expected_dtype
assert ret.shape == shape if isinstance(shape, tuple) else (shape,)
assert ret.ctx == npx.current_context()
else:
assert_exception(np.empty, NotImplementedError, shape, dtype, order, ctx)
@with_seed()
@use_np
def test_np_array_creation():
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, _np.bool, _np.bool_,
'int8', 'int32', 'float16', 'float32', 'float64', 'bool', None]
objects = [
[],
(),
[[1, 2], [3, 4]],
_np.random.randint(-10, 10, size=rand_shape_nd(3)),
_np.random.uniform(size=rand_shape_nd(3)),
_np.random.uniform(size=(3, 0, 4))
]
for dtype in dtypes:
for src in objects:
mx_arr = np.array(src, dtype=dtype)
assert mx_arr.ctx == mx.current_context()
if dtype is None:
dtype = src.dtype if isinstance(src, _np.ndarray) else _np.float32
if isinstance(src, mx.nd.NDArray):
np_arr = _np.array(src.asnumpy(), dtype=dtype)
else:
np_arr = _np.array(src, dtype=dtype)
assert mx_arr.dtype == np_arr.dtype
assert same(mx_arr.asnumpy(), np_arr)
@with_seed()
@use_np
def test_np_zeros():
# test np.zeros in Gluon
class TestZeros(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestZeros, self).__init__()
self._shape = shape
self._dtype = dtype
def hybrid_forward(self, F, x, *args, **kwargs):
return x + F.np.zeros(shape, dtype)
class TestZerosOutputType(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x, F.np.zeros(shape=())
# test np.zeros in imperative
def check_zero_array_creation(shape, dtype):
np_out = _np.zeros(shape=shape, dtype=dtype)
mx_out = np.zeros(shape=shape, dtype=dtype)
assert same(mx_out.asnumpy(), np_out)
if dtype is None:
assert mx_out.dtype == _np.float32
assert np_out.dtype == _np.float64
else:
assert mx_out.dtype == np_out.dtype
shapes = [(0,), (2, 0, 2), (0, 0, 0, 0), ()]
shapes += [rand_shape_nd(ndim, allow_zero_size=True) for ndim in range(5)]
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
for shape in shapes:
for dtype in dtypes:
check_zero_array_creation(shape, dtype)
x = np.array(_np.random.uniform(size=shape), dtype=dtype)
if dtype is None:
x = x.astype('float32')
for hybridize in [True, False]:
test_zeros = TestZeros(shape, dtype)
test_zeros_output_type = TestZerosOutputType()
if hybridize:
test_zeros.hybridize()
test_zeros_output_type.hybridize()
y = test_zeros(x)
assert type(y) == np.ndarray
assert same(x.asnumpy(), y.asnumpy())
y = test_zeros_output_type(x)
assert type(y[1]) == np.ndarray
for shape in shapes:
for dtype in [_np.bool, bool, _np.bool, 'bool']:
check_zero_array_creation(shape, dtype)
@with_seed()
@use_np
def test_np_ones():
# test np.ones in Gluon
class TestOnes(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestOnes, self).__init__()
self._shape = shape
self._dtype = dtype
def hybrid_forward(self, F, x, *args, **kwargs):
return x * F.np.ones(shape, dtype)
class TestOnesOutputType(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x, F.np.ones(shape=())
# test np.ones in imperative
def check_ones_array_creation(shape, dtype):
np_out = _np.ones(shape=shape, dtype=dtype)
mx_out = np.ones(shape=shape, dtype=dtype)
assert same(mx_out.asnumpy(), np_out)
if dtype is None:
assert mx_out.dtype == _np.float32
assert np_out.dtype == _np.float64
else:
assert mx_out.dtype == np_out.dtype
shapes = [(0,), (2, 0, 2), (0, 0, 0, 0), ()]
shapes += [rand_shape_nd(ndim, allow_zero_size=True) for ndim in range(5)]
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
for shape in shapes:
for dtype in dtypes:
check_ones_array_creation(shape, dtype)
x = mx.nd.array(_np.random.uniform(size=shape), dtype=dtype).as_np_ndarray()
if dtype is None:
x = x.astype('float32')
for hybridize in [True, False]:
test_ones = TestOnes(shape, dtype)
test_ones_output_type = TestOnesOutputType()
if hybridize:
test_ones.hybridize()
test_ones_output_type.hybridize()
y = test_ones(x)
assert type(y) == np.ndarray
assert same(x.asnumpy(), y.asnumpy())
y = test_ones_output_type(x)
assert type(y[1]) == np.ndarray
for shape in shapes:
for dtype in [_np.bool, bool, _np.bool, 'bool']:
check_ones_array_creation(shape, dtype)
@with_seed()
@use_np
def test_identity():
class TestIdentity(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestIdentity, self).__init__()
self._n = n
self._dtype = dtype
def hybrid_forward(self, F, x):
return x * F.np.identity(self._n, self._dtype)
class TestIdentityOutputType(HybridBlock):
def hybrid_forward(self, F, x):
return x, F.np.identity(0)
def check_identity_array_creation(shape, dtype):
np_out = _np.identity(n=n, dtype=dtype)
mx_out = np.identity(n=n, dtype=dtype)
assert same(mx_out.asnumpy(), np_out)
if dtype is None:
assert mx_out.dtype == _np.float32
assert np_out.dtype == _np.float64
ns = [0, 1, 2, 3, 5, 15, 30, 200]
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
for n in ns:
for dtype in dtypes:
check_identity_array_creation(n, dtype)
x = mx.nd.array(_np.random.uniform(size=(n, n)), dtype=dtype).as_np_ndarray()
if dtype is None:
x = x.astype('float32')
for hybridize in [True, False]:
test_identity = TestIdentity(n, dtype)
test_identity_output_type = TestIdentityOutputType()
if hybridize:
test_identity.hybridize()
test_identity_output_type.hybridize()
y = test_identity(x)
assert type(y) == np.ndarray
assert same(x.asnumpy() * _np.identity(n, dtype), y.asnumpy())
y = test_identity_output_type(x)
assert type(y[1]) == np.ndarray
@with_seed()
def test_np_ndarray_binary_element_wise_ops():
np_op_map = {
'+': _np.add,
'*': _np.multiply,
'-': _np.subtract,
'/': _np.divide,
'mod': _np.mod,
'pow': _np.power,
}
if is_op_runnable():
np_op_map.update({
'==': _np.equal,
'!=': _np.not_equal,
'>': _np.greater,
'>=': _np.greater_equal,
'<': _np.less,
'<=': _np.less_equal
})
def _get_grad_func(op, scalar=None, reverse=False):
if op == '+':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
collapse_sum_like(ograd, x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd
else:
return lambda ograd, x1, x2, out: ograd
elif op == '-':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
-collapse_sum_like(ograd, x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd
else:
return lambda ograd, x1, x2, out: -ograd
elif op == '*':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd * x2, x1.shape),
collapse_sum_like(ograd * x1, x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd * x2
else:
return lambda ograd, x1, x2, out: ograd * x1
elif op == '/':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd / x2, x1.shape),
collapse_sum_like(-x1 * ograd / (x2 * x2), x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd / x2
else:
return lambda ograd, x1, x2, out: -x1 * ograd / (x2 * x2)
elif op == 'mod':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
collapse_sum_like(-ograd * _np.floor(x1 / x2), x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd
else:
return lambda ograd, x1, x2, out: -ograd * _np.floor(x1 / x2)
elif op == 'pow':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd * x2 * _np.power(x1, x2 - 1), x1.shape),
collapse_sum_like(ograd * out * _np.log(x1), x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd * x2 * _np.power(x1, x2 - 1)
else:
return lambda ograd, x1, x2, out: ograd * out * _np.log(x1)
elif op in ('==', '!=', '<', '<=', '>', '>='):
if scalar is None:
return lambda ograd, x1, x2, out: (_np.zeros_like(x1), _np.zeros_like(x2))
else:
return lambda ograd, x1, x2, out: _np.zeros_like(ograd)
return None
def get_np_ret(x1, x2, op):
return np_op_map[op](x1, x2)
@use_np
class TestBinaryElementWiseOp(HybridBlock):
def __init__(self, op, scalar=None, reverse=False):
super(TestBinaryElementWiseOp, self).__init__()
self._op = op
self._scalar = scalar
self._reverse = reverse # if false, scalar is the right operand.
def hybrid_forward(self, F, x, *args):
if self._op == '+':
if self._scalar is not None:
return x + self._scalar if not self._reverse else self._scalar + x
else:
return x + args[0] if not self._reverse else args[0] + x
elif self._op == '*':
if self._scalar is not None:
return x * self._scalar if not self._reverse else self._scalar * x
else:
return x * args[0] if not self._reverse else args[0] * x
elif self._op == '-':
if self._scalar is not None:
return x - self._scalar if not self._reverse else self._scalar - x
else:
return x - args[0] if not self._reverse else args[0] - x
elif self._op == '/':
if self._scalar is not None:
return x / self._scalar if not self._reverse else self._scalar / x
else:
return x / args[0] if not self._reverse else args[0] / x
elif self._op == 'mod':
if self._scalar is not None:
return x % self._scalar if not self._reverse else self._scalar % x
else:
return x % args[0] if not self._reverse else args[0] % x
elif self._op == 'pow':
if self._scalar is not None:
return x ** self._scalar if not self._reverse else self._scalar ** x
else:
return x ** args[0] if not self._reverse else args[0] ** x
elif self._op == '>':
if self._scalar is not None:
return x > self._scalar if not self._reverse else self._scalar > x
else:
return x > args[0]
elif self._op == '>=':
if self._scalar is not None:
return x >= self._scalar if not self._reverse else self._scalar >= x
else:
return x >= args[0]
elif self._op == '<':
if self._scalar is not None:
return x < self._scalar if not self._reverse else self._scalar < x
else:
return x < args[0]
elif self._op == '<=':
if self._scalar is not None:
return x <= self._scalar if not self._reverse else self._scalar <= x
else:
return x <= args[0]
elif self._op == '==':
if self._scalar is not None:
return x == self._scalar if not self._reverse else self._scalar == x
else:
return x == args[0]
elif self._op == '!=':
if self._scalar is not None:
return x != self._scalar if not self._reverse else self._scalar != x
else:
return x != args[0]
else:
print(self._op)
assert False
logic_ops = ['==', '!=', '>', '<', '>=', '<=']
@use_np
def check_binary_op_result(shape1, shape2, op, dtype=None):
if shape1 is None:
mx_input1 = abs(_np.random.uniform()) + 1
np_input1 = mx_input1
else:
mx_input1 = (rand_ndarray(shape1, dtype=dtype).abs() + 1).as_np_ndarray()
mx_input1.attach_grad()
np_input1 = mx_input1.asnumpy()
if shape2 is None:
mx_input2 = abs(_np.random.uniform()) + 1
np_input2 = mx_input2
else:
mx_input2 = (rand_ndarray(shape2, dtype=dtype).abs() + 1).as_np_ndarray()
mx_input2.attach_grad()
np_input2 = mx_input2.asnumpy()
scalar = None
reverse = False
if isinstance(mx_input1, mx.nd.NDArray) and not isinstance(mx_input2, mx.nd.NDArray):
scalar = mx_input2
reverse = False
elif isinstance(mx_input2, mx.nd.NDArray) and not isinstance(mx_input1, mx.nd.NDArray):
scalar = mx_input1
reverse = True
grad_func = _get_grad_func(op, scalar, reverse)
np_out = get_np_ret(np_input1, np_input2, op)
ograd = _np.ones_like(np_out)
for hybridize in [True, False]:
if scalar is None:
get_mx_ret_np = TestBinaryElementWiseOp(op)
get_mx_ret_classic = TestBinaryElementWiseOp(op)
if hybridize:
get_mx_ret_np.hybridize()
get_mx_ret_classic.hybridize()
if grad_func is None:
mx_out = get_mx_ret_np(mx_input1, mx_input2)
else:
with mx.autograd.record():
mx_out = get_mx_ret_np(mx_input1, mx_input2)
mx_out.backward()
assert type(mx_out) == np.ndarray
if op in logic_ops:
assert np_out.dtype == mx_out.dtype
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-6, rtol=1e-5, use_broadcast=False)
if grad_func is not None:
x1_grad_expected, x2_grad_expected = grad_func(ograd, np_input1, np_input2, np_out)
assert_almost_equal(mx_input1.grad.asnumpy(), x1_grad_expected, atol=1e-5, rtol=1e-3,
use_broadcast=False)
assert_almost_equal(mx_input2.grad.asnumpy(), x2_grad_expected, atol=1e-5, rtol=1e-3,
use_broadcast=False)
else:
get_mx_ret = TestBinaryElementWiseOp(op, scalar=scalar, reverse=reverse)
if hybridize:
get_mx_ret.hybridize()
if reverse:
mx_input = mx_input2
else:
mx_input = mx_input1
if grad_func is None:
mx_out = get_mx_ret(mx_input)
else:
with mx.autograd.record():
mx_out = get_mx_ret(mx_input)
mx_out.backward()
assert type(mx_out) == np.ndarray
if op in logic_ops:
assert np_out.dtype == mx_out.dtype
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-6, rtol=1e-5, use_broadcast=False)
# check grad
if grad_func is not None:
x_grad_expected = grad_func(ograd, np_input1, np_input2, np_out)
assert_almost_equal(mx_input.grad.asnumpy(), x_grad_expected, atol=1e-5, rtol=1e-3,
use_broadcast=False)
dtypes = [_np.float32, _np.float64, None]
ops = np_op_map.keys()
for dtype in dtypes:
for op in ops:
check_binary_op_result((3, 4), (3, 4), op, dtype)
check_binary_op_result(None, (3, 4), op, dtype)
check_binary_op_result((3, 4), None, op, dtype)
check_binary_op_result((1, 4), (3, 1), op, dtype)
check_binary_op_result(None, (3, 1), op, dtype)
check_binary_op_result((1, 4), None, op, dtype)
check_binary_op_result((1, 4), (3, 5, 4), op, dtype)
check_binary_op_result((), (3, 5, 4), op, dtype)
check_binary_op_result((), None, op, dtype)
check_binary_op_result(None, (), op, dtype)
check_binary_op_result((0, 2), (1, 1), op, dtype)
check_binary_op_result((0, 2), None, op, dtype)
check_binary_op_result(None, (0, 2), op, dtype)
@with_seed()
def test_np_hybrid_block_multiple_outputs():
@use_np
class TestAllNumpyOutputs(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return F.np.add(x, x), F.np.multiply(x, x)
class TestAllClassicOutputs(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x.as_nd_ndarray() + x.as_nd_ndarray(), x.as_nd_ndarray() * x.as_nd_ndarray()
data_np = np.ones((2, 3))
for block, expected_out_type in [(TestAllClassicOutputs, mx.nd.NDArray),
(TestAllNumpyOutputs, np.ndarray)]:
net = block()
for hybridize in [True, False]:
if hybridize:
net.hybridize()
out1, out2 = net(data_np)
assert type(out1) is expected_out_type
assert type(out2) is expected_out_type
@use_np
class TestMixedTypeOutputsFailure(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x.as_nd_ndarray() + x.as_nd_ndarray(), F.np.multiply(x, x)
net = TestMixedTypeOutputsFailure()
assert_exception(net, TypeError, data_np)
net.hybridize()
assert_exception(net, TypeError, data_np)
@with_seed()
@use_np
def test_np_grad_ndarray_type():
data = np.array(2, dtype=_np.float32)
data.attach_grad()
assert type(data.grad) == np.ndarray
assert type(data.detach()) == np.ndarray
@with_seed()
@use_np
def test_np_ndarray_astype():
class TestAstype(HybridBlock):
def __init__(self, dtype, copy):
super(TestAstype, self).__init__()
self._dtype = dtype
self._copy = copy
def hybrid_forward(self, F, x):
return x.astype(dtype=self._dtype, copy=self._copy)
def check_astype_equal(itype, otype, copy, expect_zero_copy=False, hybridize=False):
expect_zero_copy = copy is False and itype == otype
mx_data = np.array([2, 3, 4, 5], dtype=itype)
np_data = mx_data.asnumpy()
test_astype = TestAstype(otype, copy)
if hybridize:
test_astype.hybridize()
mx_ret = test_astype(mx_data)
assert type(mx_ret) is np.ndarray
np_ret = np_data.astype(dtype=otype, copy=copy)
assert mx_ret.dtype == np_ret.dtype
assert same(mx_ret.asnumpy(), np_ret)
if expect_zero_copy and not hybridize:
assert id(mx_ret) == id(mx_data)
assert id(np_ret) == id(np_data)
dtypes = [np.int8, np.uint8, np.int32, np.float16, np.float32, np.float64, np.bool, np.bool_,
'int8', 'uint8', 'int32', 'float16', 'float32', 'float64', 'bool']
for itype, otype in itertools.product(dtypes, dtypes):
for copy in [True, False]:
for hybridize in [True, False]:
check_astype_equal(itype, otype, copy, hybridize)
@with_seed()
def test_np_ndarray_copy():
mx_data = np.array([2, 3, 4, 5], dtype=_np.int32)
assert_exception(mx_data.copy, NotImplementedError, order='F')
mx_ret = mx_data.copy()
np_ret = mx_data.asnumpy().copy()
assert same(mx_ret.asnumpy(), np_ret)
@with_seed()
def test_formatting():
def test_0d():
a = np.array(np.pi)
_a = a.asnumpy()
assert '{:0.3g}'.format(a) == '{:0.3g}'.format(_a)
assert '{:0.3g}'.format(a[()]) == '{:0.3g}'.format(_a[()])
def test_nd_format():
a = np.array([np.pi])
assert_exception('{:30}'.format, TypeError, a)
def test_nd_no_format():
a = np.array([np.pi])
_a = a.asnumpy()
assert '{}'.format(a) == '{}'.format(_a)
b = np.arange(8).reshape(2,2,2)
assert '{}'.format(a) == '{}'.format(_a)
context = mx.context.current_context()
if str(context)[:3] != 'gpu':
test_0d()
test_nd_format()
test_nd_no_format()
# if the program is running in GPU, the formatted string would be appended with context notation
# for exmpale, if a = np.array([np.pi]), the return value of '{}'.format(a) is '[3.1415927] @gpu(0)'
@with_seed()
@use_np
def test_np_ndarray_indexing():
def np_int(index, int_type=np.int32):
"""
Helper function for testing indexing that converts slices to slices of ints or None, and tuples to
tuples of ints or None.
"""
def convert(num):
if num is None:
return num
else:
return int_type(num)
if isinstance(index, slice):
return slice(convert(index.start), convert(index.stop), convert(index.step))
elif isinstance(index, tuple): # tuple of slices and integers
ret = []
for elem in index:
if isinstance(elem, slice):
ret.append(slice(convert(elem.start), convert(elem.stop), convert(elem.step)))
else:
ret.append(convert(elem))
return tuple(ret)
else:
assert False
# Copied from test_ndarray.py. Under construction.
def test_getitem(np_array, index):
np_index = index
if type(index) == mx.nd.NDArray: # use of NDArray is prohibited
assert False
if isinstance(index, np.ndarray):
np_index = index.asnumpy()
if isinstance(index, tuple):
np_index = tuple([
idx.asnumpy() if isinstance(idx, mx.nd.NDArray) else idx
for idx in index]
)
np_indexed_array = np_array[np_index]
mx_np_array = np.array(np_array, dtype=np_array.dtype)
for autograd in [True, False]:
try:
if autograd:
with mx.autograd.record():
mx_indexed_array = mx_np_array[index]
else:
mx_indexed_array = mx_np_array[index]
except Exception as e:
print('Failed with index = {}'.format(index))
raise e
mx_indexed_array = mx_indexed_array.asnumpy()
assert same(np_indexed_array, mx_indexed_array), 'Failed with index = {}'.format(index)
def test_setitem(np_array, index):
def assert_same(np_array, np_index, mx_array, mx_index, mx_value, np_value=None):
if np_value is not None:
np_array[np_index] = np_value
elif isinstance(mx_value, np.ndarray):
np_array[np_index] = mx_value.asnumpy()
else:
np_array[np_index] = mx_value
try:
mx_array[mx_index] = mx_value
except Exception as e:
print('Failed with index = {}, value.shape = {}'.format(mx_index, mx_value.shape))
raise e
assert same(np_array, mx_array.asnumpy())
def _is_basic_index(index):
if isinstance(index, (integer_types, py_slice)):
return True
if isinstance(index, tuple) and all(isinstance(i, (integer_types, py_slice)) for i in index):
return True
return False
np_index = index # keep this native numpy type
if isinstance(index, np.ndarray):
np_index = index.asnumpy()
if isinstance(index, tuple):
np_index = []
for idx in index:
if isinstance(idx, np.ndarray):
np_index.append(idx.asnumpy())
else:
np_index.append(idx)
np_index = tuple(np_index)
mx_array = np.array(np_array, dtype=np_array.dtype) # mxnet.np.ndarray
np_array = mx_array.asnumpy() # native numpy array
indexed_array_shape = np_array[np_index].shape
np_indexed_array = _np.random.randint(low=-10000, high=0, size=indexed_array_shape)
# test value is a native numpy array without broadcast
assert_same(np_array, np_index, mx_array, index, np_indexed_array)
# test value is a list without broadcast
assert_same(np_array, np_index, mx_array, index, np_indexed_array.tolist())
# test value is a mxnet numpy array without broadcast
assert_same(np_array, np_index, mx_array, index, np.array(np_indexed_array))
# test value is an numeric_type
assert_same(np_array, np_index, mx_array, index, _np.random.randint(low=-10000, high=0))
np_value = _np.random.randint(low=-10000, high=0,
size=(indexed_array_shape[-1],) if len(indexed_array_shape) > 0 else ())
# test mxnet ndarray with broadcast
assert_same(np_array, np_index, mx_array, index, np.array(np_value))
# test native numpy array with broadcast
assert_same(np_array, np_index, mx_array, index, np_value)
# test python list with broadcast
assert_same(np_array, np_index, mx_array, index, np_value.tolist())
# test value shape are expanded to be longer than index array's shape
# this is currently only supported in basic indexing
if _is_basic_index(index):
expanded_value_shape = (1, 1) + np_value.shape
assert_same(np_array, np_index, mx_array, index, np.array(np_value.reshape(expanded_value_shape)))
assert_same(np_array, np_index, mx_array, index, np_value.reshape(expanded_value_shape))
if len(expanded_value_shape) <= np_array[index].ndim:
# NumPy does not allow value.ndim > np_array[index].ndim when value is a python list.
# It may be a bug of NumPy.
assert_same(np_array, np_index, mx_array, index, np_value.reshape(expanded_value_shape).tolist())
# test list with broadcast
assert_same(np_array, np_index, mx_array, index,
[_np.random.randint(low=-10000, high=0)] * indexed_array_shape[-1] if len(indexed_array_shape) > 0
else _np.random.randint(low=-10000, high=0))
def test_getitem_autograd(np_array, index):
"""
np_array: native numpy array.
"""
x = np.array(np_array, dtype=np_array.dtype)
x.attach_grad()
with mx.autograd.record():
y = x[index]
y.backward()
value = np.ones_like(y)
x_grad = np.zeros_like(x)
x_grad[index] = value
assert same(x_grad.asnumpy(), x.grad.asnumpy())
def test_setitem_autograd(np_array, index):
"""
np_array: native numpy array.
"""
x = np.array(np_array, dtype=np_array.dtype)
out_shape = x[index].shape
y = np.array(_np.random.uniform(size=out_shape))
y.attach_grad()
try:
with mx.autograd.record():
x[index] = y
x.backward()
y_grad = np.ones_like(y)
assert same(y_grad.asnumpy(), y.grad.asnumpy())
except mx.base.MXNetError as err:
assert str(err).find('Inplace operations (+=, -=, x[:]=, etc) are not supported when recording with') != -1
shape = (8, 16, 9, 9)
np_array = _np.arange(_np.prod(_np.array(shape)), dtype='int32').reshape(shape) # native np array
# Test sliced output being ndarray:
index_list = [
(),
# Basic indexing
# Single int as index
0,
np.int32(0),
np.int64(0),
np.array(0, dtype='int32'),
np.array(0, dtype='int64'),
5,
np.int32(5),
np.int64(5),
np.array(5, dtype='int32'),
np.array(5, dtype='int64'),
-1,
np.int32(-1),
np.int64(-1),
np.array(-1, dtype='int32'),
np.array(-1, dtype='int64'),
# Slicing as index
slice(5),
np_int(slice(5), np.int32),
np_int(slice(5), np.int64),
slice(1, 5),
np_int(slice(1, 5), np.int32),
np_int(slice(1, 5), np.int64),
slice(1, 5, 2),
slice(1, 2, 2),
np_int(slice(1, 5, 2), np.int32),
np_int(slice(1, 5, 2), np.int64),
slice(7, 0, -1),
np_int(slice(7, 0, -1)),
np_int(slice(7, 0, -1), np.int64),
slice(None, 6),
np_int(slice(None, 6)),
np_int(slice(None, 6), np.int64),
slice(None, 6, 3),
np_int(slice(None, 6, 3)),
np_int(slice(None, 6, 3), np.int64),
slice(1, None),
np_int(slice(1, None)),
np_int(slice(1, None), np.int64),
slice(1, None, 3),
np_int(slice(1, None, 3)),
np_int(slice(1, None, 3), np.int64),
slice(None, None, 2),
np_int(slice(None, None, 2)),
np_int(slice(None, None, 2), np.int64),
slice(None, None, -1),
np_int(slice(None, None, -1)),
np_int(slice(None, None, -1), np.int64),
slice(None, None, -2),
np_int(slice(None, None, -2), np.int32),
np_int(slice(None, None, -2), np.int64),
# Multiple ints as indices
(1, 2, 3),
np_int((1, 2, 3)),
np_int((1, 2, 3), np.int64),
(-1, -2, -3),
np_int((-1, -2, -3)),
np_int((-1, -2, -3), np.int64),
(1, 2, 3, 4),
np_int((1, 2, 3, 4)),
np_int((1, 2, 3, 4), np.int64),
(-4, -3, -2, -1),
(-4, mx.np.array(-3, dtype='int32'), -2, -1),
(-4, mx.np.array(-3, dtype='int64'), -2, -1),
np_int((-4, -3, -2, -1)),
np_int((-4, -3, -2, -1), np.int64),
# slice(None) as indices
(slice(None), slice(None), 1, 8),
(slice(None), slice(None), np.array(1, dtype='int32'), 8),
(slice(None), slice(None), np.array(1, dtype='int64'), 8),
(slice(None), slice(None), -1, 8),
(slice(None), slice(None), 1, -8),
(slice(None), slice(None), -1, -8),
np_int((slice(None), slice(None), 1, 8)),
np_int((slice(None), slice(None), 1, 8), np.int64),
(slice(None), slice(None), 1, 8),
np_int((slice(None), slice(None), -1, -8)),
np_int((slice(None), slice(None), -1, -8), np.int64),
(slice(None), 2, slice(1, 5), 1),
np_int((slice(None), 2, slice(1, 5), 1)),
np_int((slice(None), 2, slice(1, 5), 1), np.int64),
# Mixture of ints and slices as indices
(slice(None, None, -1), 2, slice(1, 5), 1),
np_int((slice(None, None, -1), 2, slice(1, 5), 1)),
np_int((slice(None, None, -1), 2, slice(1, 5), 1), np.int64),
(slice(None, None, -1), 2, slice(1, 7, 2), 1),
np_int((slice(None, None, -1), 2, slice(1, 7, 2), 1)),
np_int((slice(None, None, -1), 2, slice(1, 7, 2), 1), np.int64),
(slice(1, 8, 2), slice(14, 2, -2), slice(3, 8), slice(0, 7, 3)),
np_int((slice(1, 8, 2), slice(14, 2, -2), slice(3, 8), slice(0, 7, 3))),
np_int((slice(1, 8, 2), slice(14, 2, -2), slice(3, 8), slice(0, 7, 3)), np.int64),
(slice(1, 8, 2), 1, slice(3, 8), 2),
np_int((slice(1, 8, 2), 1, slice(3, 8), 2)),
np_int((slice(1, 8, 2), 1, slice(3, 8), 2), np.int64),
# Test Ellipsis ('...')
(1, Ellipsis, -1),
(slice(2), Ellipsis, None, 0),
# Test newaxis
None,
(1, None, -2, 3, -4),
(1, slice(2, 5), None),
(slice(None), slice(1, 4), None, slice(2, 3)),
(slice(1, 3), slice(1, 3), slice(1, 3), slice(1, 3), None),
(slice(1, 3), slice(1, 3), None, slice(1, 3), slice(1, 3)),
(None, slice(1, 2), 3, None),
(1, None, 2, 3, None, None, 4),
# Advanced indexing
([1, 2], slice(3, 5), None, None, [3, 4]),
(slice(None), slice(3, 5), None, None, [2, 3], [3, 4]),
(slice(None), slice(3, 5), None, [2, 3], None, [3, 4]),
(None, slice(None), slice(3, 5), [2, 3], None, [3, 4]),
[1],
[1, 2],
[2, 1, 3],
[7, 5, 0, 3, 6, 2, 1],
np.array([6, 3], dtype=np.int32),
np.array([[3, 4], [0, 6]], dtype=np.int32),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int32),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int64),
np.array([[2], [0], [1]], dtype=np.int32),
np.array([[2], [0], [1]], dtype=np.int64),
np.array([4, 7], dtype=np.int32),
np.array([4, 7], dtype=np.int64),
np.array([[3, 6], [2, 1]], dtype=np.int32),
np.array([[3, 6], [2, 1]], dtype=np.int64),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int32),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int64),
(1, [2, 3]),
(1, [2, 3], np.array([[3], [0]], dtype=np.int32)),
(1, [2, 3]),
(1, [2, 3], np.array([[3], [0]], dtype=np.int64)),
(1, [2], np.array([[5], [3]], dtype=np.int32), slice(None)),
(1, [2], np.array([[5], [3]], dtype=np.int64), slice(None)),
(1, [2, 3], np.array([[6], [0]], dtype=np.int32), slice(2, 5)),
(1, [2, 3], np.array([[6], [0]], dtype=np.int64), slice(2, 5)),
(1, [2, 3], np.array([[4], [7]], dtype=np.int32), slice(2, 5, 2)),
(1, [2, 3], np.array([[4], [7]], dtype=np.int64), slice(2, 5, 2)),
(1, [2], np.array([[3]], dtype=np.int32), slice(None, None, -1)),
(1, [2], np.array([[3]], dtype=np.int64), slice(None, None, -1)),
(1, [2], np.array([[3]], dtype=np.int32), np.array([[5, 7], [2, 4]], dtype=np.int64)),
(1, [2], np.array([[4]], dtype=np.int32), np.array([[1, 3], [5, 7]], dtype='int64')),
[0],
[0, 1],
[1, 2, 3],
[2, 0, 5, 6],
([1, 1], [2, 3]),
([1], [4], [5]),
([1], [4], [5], [6]),
([[1]], [[2]]),
([[1]], [[2]], [[3]], [[4]]),
(slice(0, 2), [[1], [6]], slice(0, 2), slice(0, 5, 2)),
([[[[1]]]], [[1]], slice(0, 3), [1, 5]),
([[[[1]]]], 3, slice(0, 3), [1, 3]),
([[[[1]]]], 3, slice(0, 3), 0),
([[[[1]]]], [[2], [12]], slice(0, 3), slice(None)),
([1, 2], slice(3, 5), [2, 3], [3, 4]),
([1, 2], slice(3, 5), (2, 3), [3, 4]),
range(4),
range(3, 0, -1),
(range(4,), [1]),
(1, 1, slice(None), 1),
(1, 1, slice(None, 3), 1),
(1, 1, slice(None, 8, 3), 1),
]
for index in index_list:
test_getitem(np_array, index)
test_setitem(np_array, index)
test_getitem_autograd(np_array, index)
test_setitem_autograd(np_array, index)
# Test indexing to zero-size tensors
index_list = [
(slice(0, 0), slice(0, 0), 1, 2),
(slice(0, 0), slice(0, 0), slice(0, 0), slice(0, 0)),
]
for index in index_list:
test_getitem(np_array, index)
test_setitem(np_array, index)
test_getitem_autograd(np_array, index)
test_setitem_autograd(np_array, index)
# test zero-size tensors get and setitem
shapes_indices = [
((0), [slice(None, None, None)]),
((3, 0), [2, (slice(None, None, None)), (slice(None, None, None), None)]),
]
for shape, indices in shapes_indices:
np_array = _np.zeros(shape)
for index in indices:
test_getitem(np_array, index)
test_setitem(np_array, index)
test_getitem_autograd(np_array, index)
test_setitem_autograd(np_array, index)
@with_seed()
@use_np
def test_np_save_load_ndarrays():
shapes = [(2, 0, 1), (0,), (), (), (0, 4), (), (3, 0, 0, 0), (2, 1), (0, 5, 0), (4, 5, 6), (0, 0, 0)]
array_list = [_np.random.randint(0, 10, size=shape) for shape in shapes]
array_list = [np.array(arr, dtype=arr.dtype) for arr in array_list]
# test save/load single ndarray
for i, arr in enumerate(array_list):
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'dataset.npy')
npx.save(fname, arr)
arr_loaded = npx.load(fname)
assert isinstance(arr_loaded, list)
assert len(arr_loaded) == 1
assert _np.array_equal(arr_loaded[0].asnumpy(), array_list[i].asnumpy())
# test save/load a list of ndarrays
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'dataset.npy')
npx.save(fname, array_list)
array_list_loaded = mx.nd.load(fname)
assert isinstance(arr_loaded, list)
assert len(array_list) == len(array_list_loaded)
assert all(isinstance(arr, np.ndarray) for arr in arr_loaded)
for a1, a2 in zip(array_list, array_list_loaded):
assert _np.array_equal(a1.asnumpy(), a2.asnumpy())
# test save/load a dict of str->ndarray
arr_dict = {}
keys = [str(i) for i in range(len(array_list))]
for k, v in zip(keys, array_list):
arr_dict[k] = v
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'dataset.npy')
npx.save(fname, arr_dict)
arr_dict_loaded = npx.load(fname)
assert isinstance(arr_dict_loaded, dict)
assert len(arr_dict_loaded) == len(arr_dict)
for k, v in arr_dict_loaded.items():
assert k in arr_dict
assert _np.array_equal(v.asnumpy(), arr_dict[k].asnumpy())
@retry(5)
@with_seed()
@use_np
def test_np_multinomial():
pvals_list = [[0.0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0.0]]
sizes = [None, (), (3,), (2, 5, 7), (4, 9)]
experiements = 10000
for pvals_mx_np_array in [False, True]:
for have_size in [False, True]:
for pvals in pvals_list:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
if have_size:
for size in sizes:
freq = mx.np.random.multinomial(experiements, pvals, size=size).asnumpy() / _np.float32(experiements)
# for those cases that didn't need reshape
if size in [None, ()]:
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq, pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq, pvals, rtol=0.20, atol=1e-1)
else:
# check the shape
assert freq.shape == size + (len(pvals),), 'freq.shape={}, size + (len(pvals))={}'.format(freq.shape, size + (len(pvals)))
freq = freq.reshape((-1, len(pvals)))
# check the value for each row
for i in range(freq.shape[0]):
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq[i, :], pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq[i, :], pvals, rtol=0.20, atol=1e-1)
else:
freq = mx.np.random.multinomial(experiements, pvals).asnumpy() / _np.float32(experiements)
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq, pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq, pvals, rtol=0.20, atol=1e-1)
# check the zero dimension
sizes = [(0), (0, 2), (4, 0, 2), (3, 0, 1, 2, 0)]
for pvals_mx_np_array in [False, True]:
for pvals in pvals_list:
for size in sizes:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
freq = mx.np.random.multinomial(experiements, pvals, size=size).asnumpy()
assert freq.size == 0
# check [] as pvals
for pvals_mx_np_array in [False, True]:
for pvals in [[], ()]:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
freq = mx.np.random.multinomial(experiements, pvals).asnumpy()
assert freq.size == 0
for size in sizes:
freq = mx.np.random.multinomial(experiements, pvals, size=size).asnumpy()
assert freq.size == 0
# test small experiment for github issue
# https://github.com/apache/incubator-mxnet/issues/15383
small_exp, total_exp = 20, 10000
for pvals_mx_np_array in [False, True]:
for pvals in pvals_list:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
x = np.random.multinomial(small_exp, pvals)
for i in range(total_exp // small_exp):
x = x + np.random.multinomial(20, pvals)
freq = (x.asnumpy() / _np.float32(total_exp)).reshape((-1, len(pvals)))
for i in range(freq.shape[0]):
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq[i, :], pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq[i, :], pvals, rtol=0.20, atol=1e-1)
@with_seed()
@unittest.skipUnless(is_op_runnable(), "Comparison ops can only run on either CPU instances, or GPU instances with"
" compute capability >= 53 if MXNet is built with USE_TVM_OP=ON")
@use_np
def test_np_ndarray_boolean_indexing():
def test_single_bool_index():
# adapted from numpy's test_indexing.py
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.int32)
assert same(a[np.array(True, dtype=np.bool_)].asnumpy(), a[None].asnumpy())
assert same(a[np.array(False, dtype=np.bool_)].asnumpy(), a[None][0:0].asnumpy())
def test_boolean_catch_exception():
# adapted from numpy's test_indexing.py
arr = np.ones((5, 4, 3))
index = np.array([True], dtype=np.bool_)
assert_exception(arr.__getitem__, IndexError, index)
index = np.array([False] * 6, dtype=np.bool_)
assert_exception(arr.__getitem__, IndexError, index)
index = np.zeros((4, 4), dtype=bool)
assert_exception(arr.__getitem__, IndexError, index)
def test_boolean_indexing_onedim():
# adapted from numpy's test_indexing.py
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[0., 0., 0.]])
b = np.array([True], dtype=bool)
assert same(a[b].asnumpy(), a.asnumpy())
def test_boolean_indexing_twodim():
# adapted from numpy's test_indexing.py
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.int32)
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=np.bool_)
assert same(a[b].asnumpy(), _np.array([1, 3, 5, 7, 9], dtype=a.dtype))
assert same(a[b[1]].asnumpy(), _np.array([[4, 5, 6]], dtype=a.dtype))
assert same(a[b[0]].asnumpy(), a[b[2]].asnumpy())
def test_boolean_indexing_list():
# adapted from numpy's test_indexing.py
a = np.array([1, 2, 3], dtype=np.int32)
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert same(a[b].asnumpy(), _np.array([1, 3], dtype=a.dtype))
(a[None, b], [[1, 3]])
def test_boolean_indexing_tuple():
# case arr[:, mask, :] and arr[1, mask, 0]
# when a boolean array is in a tuple
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]], dtype=np.int32)
b = np.array([[False,True],
[True,False]],dtype=np.bool)
_np_a = a.asnumpy()
_np_b = b.asnumpy()
assert same(a[:, b].asnumpy(), _np_a[:, _np_b])
assert same(a[b, :].asnumpy(), _np_a[_np_b, :])
assert same(a[0, b].asnumpy(), _np_a[0, _np_b])
assert same(a[b, 1].asnumpy(), _np_a[_np_b, 1])
def test_boolean_indexing_assign():
# test boolean indexing assign
shape = (3, 2, 3)
mx_data = np.random.uniform(size=shape)
mx_mask = np.array([[False,True], [True,False], [True,False]],dtype=np.bool)
np_data = mx_data.asnumpy()
np_mask = mx_mask.asnumpy()
np_data[np_data>0.5] = 0
mx_data[mx_data>0.5] = 0
assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False)
np_data[np_mask] = 1
mx_data[mx_mask] = 1
assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False)
# not supported at this moment
# only support boolean array at the end of the idces when it is mixed with integers
# np_data[np_mask, 1] = 2
# mx_data[mx_mask, 1] = 2
# assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False)
np_data[np_mask, :] = 3
mx_data[mx_mask, :] = 3
assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False)
mx_mask = np.array([[False,True, True],[False, True,False]],dtype=np.bool)
np_mask = mx_mask.asnumpy()
np_data[0, np_mask] = 5
mx_data[0, mx_mask] = 5
assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False)
np_data[:, np_mask] = 6
mx_data[:, mx_mask] = 6
assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False)
def test_boolean_indexing_autograd():
a = np.random.uniform(size=(3, 4, 5))
a.attach_grad()
with mx.autograd.record():
out_mx = a[a < 0.5]
out_mx.backward()
a_np = a.asnumpy()
out_np = a_np[a_np < 0.5]
assert_almost_equal(out_mx.asnumpy(), out_np, rtol=1e-4, atol=1e-5, use_broadcast=False)
a_grad_np = _np.zeros(a.shape, dtype=a.dtype)
a_grad_np[a_np < 0.5] = 1
assert_almost_equal(a.grad.asnumpy(), a_grad_np, rtol=1e-4, atol=1e-5, use_broadcast=False)
test_single_bool_index()
test_boolean_catch_exception()
test_boolean_indexing_onedim()
test_boolean_indexing_twodim()
test_boolean_indexing_list()
test_boolean_indexing_tuple()
test_boolean_indexing_assign()
test_boolean_indexing_autograd()
@with_seed()
@use_np
def test_np_get_dtype():
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, _np.bool, _np.bool_,
'int8', 'int32', 'float16', 'float32', 'float64', 'bool', None]
objects = [
[],
(),
[[1, 2], [3, 4]],
_np.random.uniform(size=rand_shape_nd(3)),
_np.random.uniform(size=(3, 0, 4))
]
for dtype in dtypes:
for src in objects:
mx_arr = np.array(src, dtype=dtype)
assert mx_arr.ctx == mx.current_context()
if isinstance(src, mx.nd.NDArray):
np_arr = _np.array(src.asnumpy(), dtype=dtype if dtype is not None else _np.float32)
else:
np_arr = _np.array(src, dtype=dtype if dtype is not None else _np.float32)
assert type(mx_arr.dtype) == type(np_arr.dtype)
@use_np
def test_np_ndarray_pickle():
a = np.random.uniform(size=(4, 5))
a_copy = a.copy()
import pickle
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'np_ndarray_pickle_test_file')
with open(fname, 'wb') as f:
pickle.dump(a_copy, f)
with open(fname, 'rb') as f:
a_load = pickle.load(f)
same(a.asnumpy(), a_load.asnumpy())
if __name__ == '__main__':
import nose
nose.runmodule()
| 41.158594 | 150 | 0.544122 |
e48539d9142d7f5eaab6e059f60db700303a7cd7 | 19,913 | py | Python | super_resolution_utilty.py | chien-he/DRCN_tf | bdf21a59e5ffe878ba1e326e364c63ee67eb507a | [
"Apache-2.0"
] | 11 | 2018-04-25T12:36:37.000Z | 2020-06-04T08:01:27.000Z | super_resolution_utilty.py | chien-he/DRCN_tf | bdf21a59e5ffe878ba1e326e364c63ee67eb507a | [
"Apache-2.0"
] | 1 | 2019-04-28T06:01:19.000Z | 2019-04-28T06:01:19.000Z | super_resolution_utilty.py | chenhe82166/DRCN_tf | bdf21a59e5ffe878ba1e326e364c63ee67eb507a | [
"Apache-2.0"
] | 5 | 2017-12-30T06:27:54.000Z | 2019-07-24T21:04:15.000Z | # coding=utf8
from __future__ import division
import datetime
import math
import os
import shutil
from os import listdir
from os.path import isfile, join
import numpy as np
import tensorflow as tf
from PIL import Image
from scipy import misc
# utilities for save / load
test_datasets = {
"set5": ["Set5", 0, 5],
"set14": ["Set14", 0, 14],
"bsd100": ["BSD100", 0, 100],
"urban100": ["Urban100", 0, 100],
"test": ["Set5", 0, 1]
}
class LoadError(Exception):
def __init__(self, message):
self.message = message
def make_dir(directory): #创建目录
if not os.path.exists(directory):
os.makedirs(directory)
def get_files_in_directory(path): #从路径获得图片文件
file_list = [path + f for f in listdir(path) if isfile(join(path, f))]
return file_list
def remove_generic(path, __func__):
try:
__func__(path)
except OSError as error:
print("OS error: {0}".format(error))
def clean_dir(path): #Clear all tensorboard log before start
if not os.path.isdir(path):
return
files = os.listdir(path)
for x in files:
full_path = os.path.join(path, x)
if os.path.isfile(full_path):
f = os.remove
remove_generic(full_path, f)
elif os.path.isdir(full_path):
clean_dir(full_path)
f = os.rmdir
remove_generic(full_path, f)
def save_image(filename, image): #保存图片文件
if len(image.shape) >= 3 and image.shape[2] == 1:
image = image.reshape(image.shape[0], image.shape[1])
directory = os.path.dirname(filename)
if directory != "" and not os.path.exists(directory):
os.makedirs(directory)
image = misc.toimage(image, cmin=0, cmax=255) # to avoid range rescaling
misc.imsave(filename, image)
print("Saved [%s]" % filename)
# def save_image_data(filename, image):
# directory = os.path.dirname(filename)
# if directory != "" and not os.path.exists(directory):
# os.makedirs(directory)
# np.save(filename, image)
# print("Saved [%s]" % filename)
# if len(image.shape) == 3 and image.shape[2] == 1:
# image = image.reshape(image.shape[0], image.shape[1])
# misc.imsave(filename, image)
# def convert_rgb_to_y(image, jpeg_mode=True, max_value=255.0):
# if len(image.shape) <= 2 or image.shape[2] == 1:
# return image
# if jpeg_mode:
# xform = np.array([[0.299, 0.587, 0.114]])
# y_image = image.dot(xform.T)
# else:
# xform = np.array([[65.481 / 256.0, 128.553 / 256.0, 24.966 / 256.0]])
# y_image = image.dot(xform.T) + (16.0 * max_value / 256.0)
# return y_image
def convert_rgb_to_ycbcr(image, jpeg_mode=True, max_value=255): #把RGB转为YCBCR
if len(image.shape) < 2 or image.shape[2] == 1:
return image
if jpeg_mode:
xform = np.array([[0.299, 0.587, 0.114], [-0.169, - 0.331, 0.500], [0.500, - 0.419, - 0.081]])
ycbcr_image = image.dot(xform.T)
ycbcr_image[:, :, [1, 2]] += max_value / 2
else:
xform = np.array(
[[65.481 / 256.0, 128.553 / 256.0, 24.966 / 256.0], [- 37.945 / 256.0, - 74.494 / 256.0, 112.439 / 256.0],
[112.439 / 256.0, - 94.154 / 256.0, - 18.285 / 256.0]])
ycbcr_image = image.dot(xform.T)
ycbcr_image[:, :, 0] += (16.0 * max_value / 256.0)
ycbcr_image[:, :, [1, 2]] += (128.0 * max_value / 256.0)
return ycbcr_image
def convert_y_and_cbcr_to_rgb(y_image, cbcr_image, jpeg_mode=True, max_value=255.0): #把YCBCR再转为RGB
if len(y_image.shape) <= 2:
y_image = y_image.reshape[y_image.shape[0], y_image.shape[1], 1]
if len(y_image.shape) == 3 and y_image.shape[2] == 3:
y_image = y_image[:, :, 0:1]
ycbcr_image = np.zeros([y_image.shape[0], y_image.shape[1], 3])
ycbcr_image[:, :, 0] = y_image[:, :, 0]
ycbcr_image[:, :, 1:3] = cbcr_image[:, :, 0:2]
return convert_ycbcr_to_rgb(ycbcr_image, jpeg_mode=jpeg_mode, max_value=max_value)
def convert_ycbcr_to_rgb(ycbcr_image, jpeg_mode=True, max_value=255.0):
rgb_image = np.zeros([ycbcr_image.shape[0], ycbcr_image.shape[1], 3]) # type: np.ndarray
if jpeg_mode:
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - (128.0 * max_value / 256.0)
xform = np.array([[1, 0, 1.402], [1, - 0.344, - 0.714], [1, 1.772, 0]])
rgb_image = rgb_image.dot(xform.T)
else:
rgb_image[:, :, 0] = ycbcr_image[:, :, 0] - (16.0 * max_value / 256.0)
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - (128.0 * max_value / 256.0)
xform = np.array(
[[max_value / 219.0, 0, max_value * 0.701 / 112.0],
[max_value / 219, - max_value * 0.886 * 0.114 / (112 * 0.587), - max_value * 0.701 * 0.299 / (112 * 0.587)],
[max_value / 219.0, max_value * 0.886 / 112.0, 0]])
rgb_image = rgb_image.dot(xform.T)
return rgb_image
def set_image_alignment(image, alignment): #图片对准
alignment = int(alignment)
width, height = image.shape[1], image.shape[0] #修剪图片 使长宽符合2或4的倍数
width = (width // alignment) * alignment
height = (height // alignment) * alignment
if image.shape[1] != width or image.shape[0] != height:
return image[:height, :width, :]
return image
# def resize_image_by_bicubic(image, scale):
# size = [int(image.shape[0] * scale), int(image.shape[1] * scale)]
# image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])
# tf_image = tf.image.resize_bicubic(image, size=size)
# image = tf_image.eval()
# return image.reshape(image.shape[1], image.shape[2], image.shape[3])
def resize_image_by_pil_bicubic(image, scale):
width, height = image.shape[1], image.shape[0]
new_width = int(width * scale)
new_height = int(height * scale)
if len(image.shape) == 3 and image.shape[2] == 3:
image = Image.fromarray(image, "RGB")
image = image.resize([new_width, new_height], resample=Image.BICUBIC)
image = np.asarray(image)
else:
image = Image.fromarray(image.reshape(height, width))
image = image.resize([new_width, new_height], resample=Image.BICUBIC)
image = np.asarray(image)
image = image.reshape(new_height, new_width, 1)
return image
def load_image(filename, width=0, height=0, channels=0, alignment=0): #从文件夹中加载图片
if not os.path.isfile(filename):
raise LoadError("File not found [%s]" % filename)
image = misc.imread(filename)
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
if (width != 0 and image.shape[1] != width) or (height != 0 and image.shape[0] != height):
raise LoadError("Attributes mismatch")
if channels != 0 and image.shape[2] != channels:
raise LoadError("Attributes mismatch")
if alignment != 0 and ((width % alignment) != 0 or (height % alignment) != 0):
raise LoadError("Attributes mismatch")
print("Loaded [%s]: %d x %d x %d" % (filename, image.shape[1], image.shape[0], image.shape[2]))
return image
# def load_image_data(filename, width=0, height=0, channels=0, alignment=0):
# if not os.path.isfile(filename + ".npy"):
# raise LoadError("File not found")
# image = np.load(filename + ".npy")
# if (width != 0 and image.shape[1] != width) or (height != 0 and image.shape[0] != height):
# raise LoadError("Attributes mismatch")
# if channels != 0 and image.shape[2] != channels:
# raise LoadError("Attributes mismatch")
# if alignment != 0 and ((width % alignment) != 0 or (height % alignment) != 0):
# raise LoadError("Attributes mismatch")
# print("Cache Loaded [%s]: %d x %d x %d" % (filename, image.shape[1], image.shape[0], image.shape[2]))
# return image
def load_input_image(filename, width=0, height=0, channels=1, scale=1, alignment=0,
convert_ycbcr=True, jpeg_mode=False, rescale=True): #加载输入图片
image = load_image(filename)
return build_input_image(image, width, height, channels, scale, alignment,
convert_ycbcr, jpeg_mode, rescale)
def build_input_image(image, width=0, height=0, channels=1, scale=1, alignment=0,
convert_ycbcr=True, jpeg_mode=False, rescale=True): #创建网络输入图片并转为YCBCR格式
if width != 0 and height != 0:
if image.shape[0] != height or image.shape[1] != width:
x = (image.shape[1] - width) // 2
y = (image.shape[0] - height) // 2
image = image[y: y + height, x: x + width, :]
if alignment > 1:
image = set_image_alignment(image, alignment) #
if scale != 1:
image = resize_image_by_pil_bicubic(image, 1.0 / scale)
if rescale:
image = resize_image_by_pil_bicubic(image, scale)
if convert_ycbcr:
image = convert_rgb_to_ycbcr(image, jpeg_mode=jpeg_mode) #转为YCBCR格式
if channels == 1 and image.shape[2] > 1:
image = image[:, :, 0:1].copy() # use copy() since after the step we use stride_tricks.as_strided().
return image
def load_input_image_with_cache(cache_dir, org_filename, channels=1, scale=1, alignment=0,
convert_ycbcr=True, jpeg_mode=False, rescale=True): #从缓存文件中加载输入图片
if cache_dir is None or cache_dir is "":
return load_input_image(org_filename, channels=channels, scale=scale, alignment=alignment,
convert_ycbcr=convert_ycbcr, jpeg_mode=jpeg_mode, rescale=rescale)
filename, extension = os.path.splitext(org_filename)
if filename.startswith("../"):
filename = filename[len("../"):]
if scale != 1.0:
filename += "_%1.0f" % scale
if channels == 1:
filename += "_Y"
cache_filename = cache_dir + "/" + filename + extension
try:
image = load_image(cache_filename, channels=channels)
except LoadError:
image = load_input_image(org_filename, channels=channels, scale=scale, alignment=alignment,
convert_ycbcr=convert_ycbcr, jpeg_mode=jpeg_mode, rescale=rescale)
save_image(cache_filename, image)
return image
def get_split_images(image, window_size, stride=None):
if len(image.shape) == 3 and image.shape[2] == 1:
image = image.reshape(image.shape[0], image.shape[1])
window_size = int(window_size)
size = image.itemsize # byte size of each value
height, width = image.shape
if stride is None:
stride = window_size
else:
stride = int(stride)
new_height = 1 + (height - window_size) // stride
new_width = 1 + (width - window_size) // stride
shape = (new_height, new_width, window_size, window_size)
strides = size * np.array([width * stride, stride, width, 1])
windows = np.lib.stride_tricks.as_strided(image, shape=shape, strides=strides)
windows = windows.reshape(windows.shape[0] * windows.shape[1], windows.shape[2], windows.shape[3], 1)
return windows
# utilities for building graphs
def conv2d(x, w, stride, name=""):
return tf.nn.conv2d(x, w, strides=[stride, stride, 1, 1], padding="SAME", name=name + "_conv")
# tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, data_format=None, name=None)
# data_format 表示输入数据的格式,有两种分别为:“NHWC”和“NCHW”,默认格式为”NHWC“
#“NHWC”输入数据的格式为为[batch, in_height, in_width, in_channels]
#“NCHW”输入数据的格式为为[batch, in_channels, in_height, in_width]
# use_cudnn_on_gpu 表示是否使用GPU,默认为True,有GPU
# input 一个4维的数据格式,即输入数据的格式
# filter一个长度为4的一维列表,[height,width,in_channels, out_channels],即filter的map大小,以及涉及到的输入特征图和输出特征图的个数。
# strides 表示步长,是一个长为4的一维数组 strides的设置为[1,stride,stride,1]对应NHWC第一个表示在一个样本的特征图上的移动,第二三个是在filter在特征图上的移动的跨度,第四个表示在一个样本的一个通道上移动。
#表示填充方式,”SAME”表示采用填充的方式,最终输入跟输出数据的大小是一样 ,”VALID”表示采用不填充的方式,即输出大小跟输入大小不一样,
def conv2d_with_bias(x, w, stride, bias, add_relu=False, name=""):
conv = conv2d(x, w, stride, name)
if add_relu:
return tf.nn.relu(tf.add(conv, bias, name=name + "_add"), name=name + "_relu")
else:
return tf.add(conv, bias, name=name + "_add")
# def dilated_conv2d_with_bias(x, w, stride, bias, add_relu=False, name=""):
# conv = tf.nn.atrous_conv2d(x, w, 2, padding="SAME", name=name + "_conv")
# if add_relu:
# return tf.nn.relu(tf.add(conv, bias, name=name + "_add"), name=name + "_relu")
# else:
# return tf.add(conv, bias, name=name + "_add")
def xavier_cnn_initializer(shape, uniform=True, name=None):
fan_in = shape[0] * shape[1] * shape[2]
fan_out = shape[0] * shape[1] * shape[3]
n = fan_in + fan_out
if uniform:
init_range = math.sqrt(6.0 / n)
return tf.random_uniform(shape, minval=-init_range, maxval=init_range, name=name)#均匀分布随机数,范围为[minval,maxval]
else:
stddev = math.sqrt(3.0 / n)
return tf.truncated_normal(shape=shape, stddev=stddev, name=name)
def he_initializer(shape, name=None):
n = shape[0] * shape[1] * shape[2]
stddev = math.sqrt(2.0 / n)
return tf.truncated_normal(shape=shape, stddev=stddev, name=name)#tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,mean是均值,stddev是标准差。
#这个函数产生截断正态分布随机数,均值和标准差自己设定。
def weight(shape, stddev=0.01, name=None, uniform=False, initializer="xavier"):
if initializer == "xavier":
initial = xavier_cnn_initializer(shape, uniform=uniform, name=name)
elif initializer == "he":
initial = he_initializer(shape, name=name)
elif initializer == "uniform":
initial = tf.random_uniform(shape, minval=-2.0 * stddev, maxval=2.0 * stddev)
elif initializer == "stddev":
initial = tf.truncated_normal(shape=shape, stddev=stddev)
elif initializer == "diagonal":
initial = tf.truncated_normal(shape=shape, stddev=stddev)
if len(shape) == 4:
initial = initial.eval()
i = shape[0] // 2
j = shape[1] // 2
for k in range(min(shape[2], shape[3])):
initial[i][j][k][k] = 1.0
else:
initial = tf.zeros(shape)
return tf.Variable(initial, name=name)
def bias(shape, initial_value=0.0, name=None):
initial = tf.constant(initial_value, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.Variable(initial, name=name)
#utilities for logging记录工具 -----
def add_summaries(scope_name, model_name, var, stddev=True, mean=False, max=False, min=False):
with tf.name_scope(scope_name):
mean_var = tf.reduce_mean(var)
if mean:
tf.summary.scalar("mean/" + model_name, mean_var)
if stddev:
stddev_var = tf.sqrt(tf.reduce_sum(tf.square(var - mean_var)))
tf.summary.scalar("stddev/" + model_name, stddev_var)
if max:
tf.summary.scalar("max/" + model_name, tf.reduce_max(var))
if min:
tf.summary.scalar("min/" + model_name, tf.reduce_min(var))
tf.summary.histogram(model_name, var)
def get_now_date(): #获取时间
d = datetime.datetime.today()
return "%s/%s/%s %s:%s:%s" % (d.year, d.month, d.day, d.hour, d.minute, d.second)
def get_loss_image(image1, image2, scale=1.0, border_size=0):
if len(image1.shape) == 2:
image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)
if len(image2.shape) == 2:
image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)
if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:
return None
if image1.dtype == np.uint8:
image1 = image1.astype(np.double)
if image2.dtype == np.uint8:
image2 = image2.astype(np.double)
loss_image = np.multiply(np.square(np.subtract(image1, image2)), scale)
loss_image = np.minimum(loss_image, 255.0)
loss_image = loss_image[border_size:-border_size, border_size:-border_size, :]
return loss_image
def compute_mse(image1, image2, border_size=0): #计算两张图的mse
if len(image1.shape) == 2:
image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)
if len(image2.shape) == 2:
image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)
if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:
return None
if image1.dtype == np.uint8:
image1 = image1.astype(np.double)
if image2.dtype == np.uint8:
image2 = image2.astype(np.double)
mse = 0.0
for i in range(border_size, image1.shape[0] - border_size):
for j in range(border_size, image1.shape[1] - border_size):
for k in range(image1.shape[2]):
error = image1[i, j, k] - image2[i, j, k]
mse += error * error
return mse / ((image1.shape[0] - 2 * border_size) * (image1.shape[1] - 2 * border_size) * image1.shape[2])
# def print_CNN_weight(tensor):
# print("Tensor[%s] shape=%s" % (tensor.name, str(tensor.get_shape())))
# weight = tensor.eval()
# for i in range(weight.shape[3]):
# values = ""
# for x in range(weight.shape[0]):
# for y in range(weight.shape[1]):
# for c in range(weight.shape[2]):
# values += "%2.3f " % weight[y][x][c][i]
# print(values)
# print("\n")
# def print_CNN_bias(tensor):
# print("Tensor[%s] shape=%s" % (tensor.name, str(tensor.get_shape())))
# bias = tensor.eval()
# values = ""
# for i in range(bias.shape[0]):
# values += "%2.3f " % bias[i]
# print(values + "\n")
def get_test_filenames(data_folder, dataset, scale):
test_folder = data_folder + "/" + test_datasets[dataset][0] +"/"
test_filenames = []
for i in range(test_datasets[dataset][1], test_datasets[dataset][2]):
test_filenames.append(test_folder + "img_%03d.png" % (i + 1))
return test_filenames
def build_test_filenames(data_folder, dataset, scale):
test_filenames = []
if dataset == "all":
for test_dataset in test_datasets:
test_filenames += get_test_filenames(data_folder, test_dataset, scale)
else:
test_filenames += get_test_filenames(data_folder, dataset, scale)
return test_filenames
def get_psnr(mse, max_value=255.0): #求psnr
if mse is None or mse == float('Inf') or mse == 0:
psnr = 0
else:
psnr = 20 * math.log(max_value / math.sqrt(mse), 10)
return psnr
def print_num_of_total_parameters():
total_parameters = 0
parameters_string = ""
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
parameters_string += ("%s-%d, " % (str(shape), variable_parameters))
print(parameters_string)
print("Total %d variables, %s params" % (len(tf.trainable_variables()), "{:,}".format(total_parameters)))
# utility for extracting target files from datasets
def main():
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("org_data_folder", "org_data", "Folder for original datasets")
flags.DEFINE_string("test_set", "all", "Test dataset. set5, set14, bsd100, urban100 or all are available")
flags.DEFINE_integer("scale", 2, "Scale for Super Resolution (can be 2 or 4)")
test_filenames = build_test_filenames(FLAGS.org_data_folder, FLAGS.test_set, FLAGS.scale)
for filename in test_filenames:
target_filename = "data/" + filename
print("[%s] > [%s]" % (filename, target_filename))
if not os.path.exists(os.path.dirname(target_filename)):
os.makedirs(os.path.dirname(target_filename))
shutil.copy(filename, target_filename)
print("OK.")
if __name__ == '__main__':
main()
| 35.879279 | 146 | 0.627228 |
75af01be32a9ef9dcc10fa2d2fff616384496674 | 2,947 | py | Python | database/tests/open_alchemy/package_database/test_models/spec/test_count_customer_models.py | open-alchemy/OpenAlchemyPackage | 8bf0ed62ed7f6c5015f1bf1c4658dc353395fe9b | [
"Apache-2.0"
] | null | null | null | database/tests/open_alchemy/package_database/test_models/spec/test_count_customer_models.py | open-alchemy/OpenAlchemyPackage | 8bf0ed62ed7f6c5015f1bf1c4658dc353395fe9b | [
"Apache-2.0"
] | 79 | 2020-11-28T04:02:25.000Z | 2021-01-06T08:52:30.000Z | database/tests/open_alchemy/package_database/test_models/spec/test_count_customer_models.py | open-alchemy/Package | 8bf0ed62ed7f6c5015f1bf1c4658dc353395fe9b | [
"Apache-2.0"
] | null | null | null | """Tests for the models."""
import pytest
from open_alchemy.package_database import factory, models
COUNT_CUSTOMER_MODELS_TESTS = [
pytest.param([], "sub 2", 0, id="empty"),
pytest.param(
[factory.SpecFactory(sub="sub 1")],
"sub 1",
0,
id="single item sub miss",
),
pytest.param(
[factory.SpecFactory(sub="sub 1", updated_at_id="11#spec 1")],
"sub 1",
0,
id="single item sub hit updated_at miss",
),
pytest.param(
[
factory.SpecFactory(
sub="sub 1",
updated_at_id=f"{models.Spec.UPDATED_AT_LATEST}#spec 1",
model_count=12,
)
],
"sub 1",
12,
id="single item sub hit updated_at hit",
),
pytest.param(
[
factory.SpecFactory(
sub="sub 2",
updated_at_id=f"{models.Spec.UPDATED_AT_LATEST}#spec 2",
model_count=22,
)
],
"sub 2",
22,
id="single item sub hit updated_at different hit",
),
pytest.param(
[
factory.SpecFactory(sub="sub 2"),
factory.SpecFactory(sub="sub 2"),
],
"sub 1",
0,
id="multiple item all miss",
),
pytest.param(
[
factory.SpecFactory(
sub="sub 1",
updated_at_id=f"{models.Spec.UPDATED_AT_LATEST}#spec 1",
model_count=12,
),
factory.SpecFactory(sub="sub 2"),
],
"sub 1",
12,
id="multiple item first hit",
),
pytest.param(
[
factory.SpecFactory(sub="sub 1"),
factory.SpecFactory(
sub="sub 2",
updated_at_id=f"{models.Spec.UPDATED_AT_LATEST}#spec 2",
model_count=22,
),
],
"sub 2",
22,
id="multiple item second hit",
),
pytest.param(
[
factory.SpecFactory(
sub="sub 1",
updated_at_id=f"{models.Spec.UPDATED_AT_LATEST}#spec 1",
model_count=12,
),
factory.SpecFactory(
sub="sub 1",
updated_at_id=f"{models.Spec.UPDATED_AT_LATEST}#spec 2",
model_count=22,
),
],
"sub 1",
34,
id="multiple item all hit",
),
]
@pytest.mark.parametrize("items, sub, expected_count", COUNT_CUSTOMER_MODELS_TESTS)
@pytest.mark.models
def test_count_customer_models(items, sub, expected_count):
"""
GIVEN items in the database and sub
WHEN count_customer_models on Spec is called with the sub
THEN the expected count is returned.
"""
for item in items:
item.save()
returned_count = models.Spec.count_customer_models(sub=sub)
assert returned_count == expected_count
| 26.079646 | 83 | 0.507974 |
d7cc0dc43c7c48d2e9f6eaa88bdb17c92339c13b | 3,429 | py | Python | ctm_api_client/models/deployment_file_error.py | tadinve/ctm_python_client | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | [
"BSD-3-Clause"
] | null | null | null | ctm_api_client/models/deployment_file_error.py | tadinve/ctm_python_client | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | [
"BSD-3-Clause"
] | null | null | null | ctm_api_client/models/deployment_file_error.py | tadinve/ctm_python_client | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class DeploymentFileError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {"lines": "list[str]"}
attribute_map = {"lines": "lines"}
def __init__(self, lines=None, _configuration=None): # noqa: E501
"""DeploymentFileError - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._lines = None
self.discriminator = None
if lines is not None:
self.lines = lines
@property
def lines(self):
"""Gets the lines of this DeploymentFileError. # noqa: E501
:return: The lines of this DeploymentFileError. # noqa: E501
:rtype: list[str]
"""
return self._lines
@lines.setter
def lines(self, lines):
"""Sets the lines of this DeploymentFileError.
:param lines: The lines of this DeploymentFileError. # noqa: E501
:type: list[str]
"""
self._lines = lines
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(DeploymentFileError, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeploymentFileError):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DeploymentFileError):
return True
return self.to_dict() != other.to_dict()
| 28.106557 | 85 | 0.562846 |
432b1d6c378169fde5ac0b89a5b6cc1f008ca10a | 411 | py | Python | Django_Intershala/Django_Intershala/wsgi.py | samir321-pixel/Django_Intershala | 77aaa24a34873dab4c3302727d5f43986a99809e | [
"MIT"
] | 7 | 2021-03-08T17:09:39.000Z | 2021-12-30T09:44:44.000Z | Django_Intershala/Django_Intershala/wsgi.py | samir321-pixel/Django_Intershala | 77aaa24a34873dab4c3302727d5f43986a99809e | [
"MIT"
] | null | null | null | Django_Intershala/Django_Intershala/wsgi.py | samir321-pixel/Django_Intershala | 77aaa24a34873dab4c3302727d5f43986a99809e | [
"MIT"
] | 2 | 2021-03-03T11:35:05.000Z | 2021-03-22T17:00:16.000Z | """
WSGI config for Django_Intershala project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_Intershala.settings')
application = get_wsgi_application()
| 24.176471 | 78 | 0.79562 |
d9bcb1d199d162a48b629e6cbc185166350923ba | 1,466 | py | Python | leetcode-CP/Daily-Questions/1721. Swapping Nodes in a Linked List.py | vijay2020pc/100-days-of-code | b59e54471015b294bad408289e6d9101d7494b01 | [
"MIT"
] | null | null | null | leetcode-CP/Daily-Questions/1721. Swapping Nodes in a Linked List.py | vijay2020pc/100-days-of-code | b59e54471015b294bad408289e6d9101d7494b01 | [
"MIT"
] | null | null | null | leetcode-CP/Daily-Questions/1721. Swapping Nodes in a Linked List.py | vijay2020pc/100-days-of-code | b59e54471015b294bad408289e6d9101d7494b01 | [
"MIT"
] | null | null | null | You are given the head of a linked list, and an integer k.
Return the head of the linked list after swapping the values of the kth node from the beginning and the kth node from the end (the list is 1-indexed).
Example 1:
Input: head = [1,2,3,4,5], k = 2
Output: [1,4,3,2,5]
Example 2:
Input: head = [7,9,6,6,7,8,3,0,9,5], k = 5
Output: [7,9,6,6,8,7,3,0,9,5]
Constraints:
The number of nodes in the list is n.
1 <= k <= n <= 105
0 <= Node.val <= 100
Solution:-
class Solution:
def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
# k_begin is the k-th node from the beginning
k_begin = head
for _ in range(k-1):
k_begin = k_begin.next
# k_end is the k-th node from the end.
# To find the k-th node from the end we use two pointers:
# k_end and ptr which both point to head at the beginning
k_end, ptr = head, head
# we now create a k-distance between k_end and ptr
for _ in range(k):
ptr = ptr.next
# now we keep traversing the linked list with ptr and, behind,
# k_end. When ptr reaches the end (ptr == None), k_end is
# pointing to the k-th node from the end
while ptr:
ptr = ptr.next
k_end = k_end.next
# now swap the values
k_begin.val, k_end.val = k_end.val, k_begin.val
return head
| 27.660377 | 150 | 0.582538 |
7edd3624677d9687456b49f5cf4c02782588d517 | 2,867 | py | Python | detective/users/migrations/0001_initial.py | achoy/email-detective | 4d10bb4bbefd10b8a90e15ae04d11fbf7187c3a7 | [
"MIT"
] | null | null | null | detective/users/migrations/0001_initial.py | achoy/email-detective | 4d10bb4bbefd10b8a90e15ae04d11fbf7187c3a7 | [
"MIT"
] | 5 | 2020-06-05T22:47:51.000Z | 2022-02-10T08:10:49.000Z | detective/users/migrations/0001_initial.py | achoy/email-detective | 4d10bb4bbefd10b8a90e15ae04d11fbf7187c3a7 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-09-03 02:53
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 63.711111 | 329 | 0.663062 |
761822cde31b96502f5753e4e50e9cb9f9a8f5b1 | 1,643 | py | Python | run.py | eriksonwilliam/rota-viagem | 57316470e4daa58d21c391e3587b03810bf3ebef | [
"MIT"
] | null | null | null | run.py | eriksonwilliam/rota-viagem | 57316470e4daa58d21c391e3587b03810bf3ebef | [
"MIT"
] | null | null | null | run.py | eriksonwilliam/rota-viagem | 57316470e4daa58d21c391e3587b03810bf3ebef | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
from Controllers.Main import *
from Models.SearchRoute import *
app = Flask(__name__)
api = None
@app.route("/api/create", methods=["POST"])
def create():
data = request.get_json()
if isBlank(data['origin']):
return jsonify({"message":"Origin cannot be null"}), 406
elif isBlank(data['destiny']):
return jsonify({"message":"Destiny cannot be null"}), 406
elif data['amount'] <= 0:
return jsonify({"message":"Amount cannot be less than or equal to 0"}), 406
api.dataFile.writeFile(data['origin'], data['destiny'], data['amount'])
return jsonify({"message": "New route successfully created"})
@app.route("/api/search", methods=["POST"])
def search():
data = request.get_json()
if isBlank(data['origin']):
return jsonify({"message":"Origin cannot be null or empty"}), 406
elif isBlank(data['destiny']):
return jsonify({"message":"Destiny cannot be null or empty"}), 406
search = Search()
api.dataFile.readFile()
better_route = search.better_price_travel(route=data['origin']+"-"+data['destiny'],dataRoutes= api.dataFile.dataInput)
if better_route is not None:
return jsonify({"route": better_route[0], "amount":better_route[1]})
return jsonify({"message":"Route not found"}), 400
def isBlank (data):
if data and data.strip():
return False
return True
if __name__ == '__main__':
args = []
for param in sys.argv:
args.append(param)
fileInput = args[1]
api = Main(fileInput)
api.openningFile()
app.run(debug=True) | 28.824561 | 122 | 0.639684 |
c5d55a37e36094f6f4f4cf81865507645d496efa | 2,382 | py | Python | src/fuzzingtool/core/plugins/encoders/url.py | NESCAU-UFLA/FuzzyingTool | ee0a3c149fb9839fb269cc0f254fb3234058e6af | [
"MIT"
] | null | null | null | src/fuzzingtool/core/plugins/encoders/url.py | NESCAU-UFLA/FuzzyingTool | ee0a3c149fb9839fb269cc0f254fb3234058e6af | [
"MIT"
] | null | null | null | src/fuzzingtool/core/plugins/encoders/url.py | NESCAU-UFLA/FuzzyingTool | ee0a3c149fb9839fb269cc0f254fb3234058e6af | [
"MIT"
] | null | null | null | # Copyright (c) 2020 - present Vitor Oriel <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from urllib.parse import quote, unquote
from ...bases.base_plugin import Plugin
from ...bases.base_encoder import BaseEncoder
from ....decorators.plugin_meta import plugin_meta
from ....exceptions import BadArgumentFormat
@plugin_meta
class Url(BaseEncoder, Plugin):
__author__ = ("Vitor Oriel",)
__params__ = {
'metavar': "ENCODE_LEVEL",
'type': str,
}
__desc__ = "Replace special characters in string using the %xx escape. Letters, digits, and the characters '_.-~' are never quoted."
__type__ = None
__version__ = "0.2"
def __init__(self, encode_level: int):
if not encode_level:
encode_level = 1
else:
try:
encode_level = int(encode_level)
except ValueError:
raise BadArgumentFormat("the encoding level must be an integer")
self.encode_level = encode_level
def encode(self, payload: str) -> str:
encoded = payload
for _ in range(self.encode_level):
encoded = quote(encoded)
return encoded
def decode(self, payload: str) -> str:
decoded = payload
for _ in range(self.encode_level):
decoded = unquote(decoded)
return decoded
| 39.04918 | 136 | 0.701511 |
1dda247f94af5eb819c00b80ace86d9fddc29b95 | 29,768 | py | Python | app/src/iam_cleanup.py | strongdm/aws-auto-cleanup | 1a47a2f31e72a9a01e3260b9164d318976b14bf1 | [
"MIT"
] | null | null | null | app/src/iam_cleanup.py | strongdm/aws-auto-cleanup | 1a47a2f31e72a9a01e3260b9164d318976b14bf1 | [
"MIT"
] | null | null | null | app/src/iam_cleanup.py | strongdm/aws-auto-cleanup | 1a47a2f31e72a9a01e3260b9164d318976b14bf1 | [
"MIT"
] | 1 | 2021-12-09T17:11:01.000Z | 2021-12-09T17:11:01.000Z | import datetime
import sys
import time
import boto3
from src.helper import Helper
class IAMCleanup:
def __init__(self, logging, whitelist, settings, execution_log):
self.logging = logging
self.whitelist = whitelist
self.settings = settings
self.execution_log = execution_log
self.region = "global"
self._client_iam = None
self._dry_run = self.settings.get("general", {}).get("dry_run", True)
@property
def client_iam(self):
if not self._client_iam:
self._client_iam = boto3.client("iam")
return self._client_iam
def run(self):
self.policies()
self.roles()
def policies(self):
"""
Deletes IAM Policies.
"""
self.logging.debug("Started cleanup of IAM Policies.")
clean = (
self.settings.get("services", {})
.get("iam", {})
.get("policy", {})
.get("clean", False)
)
if clean:
try:
paginator = self.client_iam.get_paginator("list_policies")
response_iterator = paginator.paginate(
Scope="Local"
).build_full_result()
except:
self.logging.error("Could not list all IAM Policies.")
self.logging.error(sys.exc_info()[1])
return False
ttl_days = (
self.settings.get("services", {})
.get("iam", {})
.get("policy", {})
.get("ttl", 7)
)
for resource in response_iterator.get("Policies"):
resource_id = resource.get("PolicyName")
resource_arn = resource.get("Arn")
resource_date = resource.get("UpdateDate")
resource_action = None
if resource_id not in self.whitelist.get("iam", {}).get("policy", []):
delta = Helper.get_day_delta(resource_date)
if delta.days > ttl_days:
if resource.get("AttachmentCount") > 0:
# - Detach the policy from all users, groups, and roles that the policy is attached to,
# using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy API operations.
# To list all the users, groups, and roles that a policy is attached to, use ListEntitiesForPolicy.
entities_paginator = self.client_iam.get_paginator(
"list_entities_for_policy"
)
try:
user_response_iterator = entities_paginator.paginate(
PolicyArn=resource_arn, EntityFilter="User"
).build_full_result()
except:
self.logging.error(
f"Could not list all IAM Users with IAM Policy '{resource_id}' attached."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for user_resource in user_response_iterator.get(
"PolicyUsers"
):
try:
if not self._dry_run:
self.client_iam.detach_user_policy(
UserName=user_resource.get("UserName"),
PolicyArn=resource_arn,
)
except:
self.logging.error(
f"""Could not detatch IAM Policy '{resource_id}' from IAM User {user_resource.get("UserName")}."""
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"""IAM Policy '{resource_id}' was detatched from IAM User {user_resource.get("UserName")}."""
)
try:
role_response_iterator = entities_paginator.paginate(
PolicyArn=resource_arn, EntityFilter="Role"
).build_full_result()
except:
self.logging.error(
f"Could not list all IAM Roles with IAM Policy '{resource_id}' attached."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for role_resource in role_response_iterator.get(
"PolicyRoles"
):
try:
if not self._dry_run:
self.client_iam.detach_role_policy(
RoleName=role_resource.get("RoleName"),
PolicyArn=resource_arn,
)
except:
self.logging.error(
f"""Could not detatch IAM Policy '{resource_id}' from IAM Role {role_resource.get("RoleName")}."""
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"""IAM Policy '{resource_id}' was detatched from IAM Role {role_resource.get("RoleName")}."""
)
try:
group_response_iterator = entities_paginator.paginate(
PolicyArn=resource_arn, EntityFilter="Group"
).build_full_result()
except:
self.logging.error(
f"Could not list all IAM Policies with IAM Group '{resource_id}' attached."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for group_resource in group_response_iterator.get(
"PolicyGroups"
):
try:
if not self._dry_run:
self.client_iam.detach_group_policy(
GroupName=group_resource.get(
"GroupName"
),
PolicyArn=resource_arn,
)
except:
self.logging.error(
f"""Could not detatch IAM Policy '{resource_id}' from IAM Group {group_resource.get("GroupName")}."""
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"""IAM Policy '{resource_id}' was detatched from IAM Group {group_resource.get("GroupName")}."""
)
# - Delete all versions of the policy using DeletePolicyVersion. To list the policy's versions, use ListPolicyVersions.
# You cannot use DeletePolicyVersion to delete the version that is marked as the default version.
# You delete the policy's default version in the next step of the process.
try:
versions_paginator = self.client_iam.get_paginator(
"list_policy_versions"
)
versions_response_iterator = versions_paginator.paginate(
PolicyArn=resource_arn
).build_full_result()
except:
self.logging.error(
f"Could not list all IAM Policy's '{resource_id}' versions."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for versions_resource in versions_response_iterator.get(
"Versions"
):
if not versions_resource.get("IsDefaultVersion"):
try:
if not self._dry_run:
self.client_iam.delete_policy_version(
PolicyArn=resource_arn,
VersionId=versions_resource.get(
"VersionId"
),
)
except:
self.logging.error(
f"""Could not delete IAM Policy Version '{versions_resource.get("VersionId")}' for IAM Policy {resource_id}."""
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"""IAM Policy Version '{versions_resource.get("VersionId")}' was deleted for IAM Policy {resource_id}."""
)
# - Delete the policy (this automatically deletes the policy's default version) using this API.
try:
if not self._dry_run:
self.client_iam.delete_policy(PolicyArn=resource_arn)
except:
self.logging.error(
f"Could not delete IAM Policy '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.info(
f"IAM Policy '{resource_id}' was last modified {delta.days} days ago "
"and has been deleted."
)
resource_action = "DELETE"
else:
self.logging.debug(
f"IAM Policy '{resource_id}' was last modified {delta.days} days ago "
"(less than TTL setting) and has not been deleted."
)
resource_action = "SKIP - TTL"
else:
self.logging.debug(
f"IAM Policy '{resource_id}' has been whitelisted and has not been deleted."
)
resource_action = "SKIP - WHITELIST"
Helper.record_execution_log_action(
self.execution_log,
self.region,
"IAM",
"Policy",
resource_id,
resource_action,
)
self.logging.debug("Finished cleanup of IAM Policies.")
return True
else:
self.logging.info("Skipping cleanup of IAM Policies.")
return True
def roles(self):
"""
Deletes IAM Roles.
"""
self.logging.debug("Started cleanup of IAM Roles.")
clean = (
self.settings.get("services", {})
.get("iam", {})
.get("role", {})
.get("clean", False)
)
if clean:
try:
paginator = self.client_iam.get_paginator("list_roles")
response_iterator = paginator.paginate().build_full_result()
except:
self.logging.error("Could not list all IAM Roles.")
self.logging.error(sys.exc_info()[1])
return False
ttl_days = (
self.settings.get("services", {})
.get("iam", {})
.get("role", {})
.get("ttl", 7)
)
for resource in response_iterator.get("Roles"):
resource_id = resource.get("RoleName")
resource_arn = resource.get("Arn")
resource_date = resource.get("CreateDate")
resource_action = None
describe_role = self.client_iam.get_role(RoleName=resource_id)
resource_tags = describe_role.get("Role").get("Tags")
if resource_tags:
Helper.parse_tags(resource_tags, "iam:role:" + resource_id, self.region)
self.whitelist = Helper.get_whitelist()
if "AWSServiceRoleFor" not in resource_id:
if resource_id not in self.whitelist.get("iam", {}).get("role", []):
delta = Helper.get_day_delta(resource_date)
if delta.days > ttl_days:
# check when the role was last accessed
try:
gen_last_accessed = self.client_iam.generate_service_last_accessed_details(
Arn=resource_arn
)
except:
self.logging.error(
f"Could not generate IAM Role last accessed details for '{resource_arn}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
try:
get_last_accessed = self.client_iam.get_service_last_accessed_details(
JobId=gen_last_accessed.get("JobId")
)
except:
self.logging.error(
f"Could not get IAM Role last accessed details for '{resource_arn}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
backoff = 1
while (
get_last_accessed.get("JobStatus")
== "IN_PROGRESS"
):
if backoff <= 16:
time.sleep(backoff)
try:
get_last_accessed = self.client_iam.get_service_last_accessed_details(
JobId=gen_last_accessed.get("JobId")
)
except:
self.logging.error(
f"Could not get IAM Role last accessed details for '{resource_arn}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
backoff = 99
else:
backoff = 2 * backoff
else:
self.logging.error(
f"Could not retrieve IAM Role '{resource_id}' last accessed "
"details in a reasonable amount of time."
)
resource_action = "ERROR"
if get_last_accessed.get("JobStatus") == "COMPLETED":
last_accessed = (
datetime.datetime.now()
- datetime.timedelta(days=365)
)
for service in get_last_accessed.get(
"ServicesLastAccessed"
):
service_date = service.get(
"LastAuthenticated", "1900-01-01 00:00:00"
)
if Helper.convert_to_datetime(
service_date
) > Helper.convert_to_datetime(last_accessed):
last_accessed = service_date
delta = Helper.get_day_delta(last_accessed)
if delta.days > ttl_days:
# delete all inline policies
try:
policies = self.client_iam.list_role_policies(
RoleName=resource_id
)
except:
self.logging.error(
f"Could not retrieve inline IAM Policies for IAM Role '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
continue
for policy in policies.get("PolicyNames"):
try:
if not self._dry_run:
self.client_iam.delete_role_policy(
RoleName=resource_id,
PolicyName=policy,
)
except:
self.logging.error(
f"Could not delete an inline IAM Policy '{policy}' from IAM Role '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"IAM Policy '{policy}' has been deleted from IAM Role '{resource_id}'."
)
# detach all managed policies
try:
policies = (
self.client_iam.list_attached_role_policies(
RoleName=resource_id
)
)
except:
self.logging.error(
f"Could not retrieve managed IAM Policies attached to IAM Role '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for policy in policies.get("AttachedPolicies"):
try:
if not self._dry_run:
self.client_iam.detach_role_policy(
RoleName=resource_id,
PolicyArn=policy.get(
"PolicyArn"
),
)
except:
self.logging.error(
f"Could not detach a managed IAM Policy '{policy.get('PolicyName')}' from IAM Role '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"IAM Policy '{policy.get('PolicyName')}' has been detached from IAM Role '{resource_id}'."
)
# delete all instance profiles
try:
profiles = self.client_iam.list_instance_profiles_for_role(
RoleName=resource_id
)
except:
self.logging.error(
f"Could not retrieve IAM Instance Profiles associated with IAM Role '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for profile in profiles.get("InstanceProfiles"):
# remove role from instance profile
try:
if not self._dry_run:
self.client_iam.remove_role_from_instance_profile(
InstanceProfileName=profile.get(
"InstanceProfileName"
),
RoleName=resource_id,
)
except:
self.logging.error(
f"Could not remove IAM Role '{resource_id}' from IAM Instance Profile '{profile.get('InstanceProfileName')}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"IAM Role '{resource_id}' has been removed from IAM Instance Profile '{profile.get('InstanceProfileName')}'."
)
# delete instance profile
try:
if not self._dry_run:
self.client_iam.delete_instance_profile(
InstanceProfileName=profile.get(
"InstanceProfileName"
)
)
except:
self.logging.error(
f"Could not delete IAM Instance Profile '{profile.get('InstanceProfileName')}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.debug(
f"IAM Instance Profile '{profile.get('InstanceProfileName')}' has been deleted."
)
# delete role
try:
if not self._dry_run:
self.client_iam.delete_role(
RoleName=resource_id
)
except:
self.logging.error(
f"Could not delete IAM Role '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.info(
f"IAM Role '{resource_id}' was created {delta.days} days ago "
"and has been deleted."
)
resource_action = "DELETE"
else:
self.logging.debug(
f"IAM Role '{resource_id}' was last accessed {delta.days} days ago "
"(less than TTL setting) and has not been deleted."
)
resource_action = "SKIP - TTL"
else:
self.logging.error(
f"Could not get IAM Role last accessed details for '{resource_id}'."
)
resource_action = "ERROR"
else:
self.logging.debug(
f"IAM Role '{resource_id}' was created {delta.days} days ago "
"(less than TTL setting) and has not been deleted."
)
resource_action = "SKIP - TTL"
else:
self.logging.debug(
f"IAM Role '{resource_id}' has been whitelisted and has not been deleted."
)
resource_action = "SKIP - WHITELIST"
Helper.record_execution_log_action(
self.execution_log,
self.region,
"IAM",
"Role",
resource_id,
resource_action,
)
self.logging.debug("Finished cleanup of IAM Roles.")
return True
else:
self.logging.info("Skipping cleanup of IAM Roles.")
return True
| 54.222222 | 162 | 0.34574 |
04dd05049ff80a56a2888e2633d6bb0ff36cb156 | 258 | py | Python | roundednumberexample.py | seanmacb/COMP-115-Exercises | fbe7e5b158f2db785b886b6c600f1a8beb19ab1f | [
"MIT"
] | null | null | null | roundednumberexample.py | seanmacb/COMP-115-Exercises | fbe7e5b158f2db785b886b6c600f1a8beb19ab1f | [
"MIT"
] | null | null | null | roundednumberexample.py | seanmacb/COMP-115-Exercises | fbe7e5b158f2db785b886b6c600f1a8beb19ab1f | [
"MIT"
] | null | null | null | #Gives the square root of a number rounded to 2 dec places
import math
def main():
num=eval(input("Enter your number here: "))
sqroot=math.sqrt(num)
sqroot= int((sqroot + 0.005) * 100) / 100
print("The square root of",num,"is",sqroot)
main() | 28.666667 | 58 | 0.662791 |
cafdf17df1c0ff752c3a594c52f6ea0c9b346ff7 | 411 | py | Python | startproject/crudgeodjangoproj/wsgi.py | krishnaglodha/CRUD-using-geodjango | 2c9e4c3184499ddc3e04b961dae77560b2a87c52 | [
"MIT"
] | null | null | null | startproject/crudgeodjangoproj/wsgi.py | krishnaglodha/CRUD-using-geodjango | 2c9e4c3184499ddc3e04b961dae77560b2a87c52 | [
"MIT"
] | null | null | null | startproject/crudgeodjangoproj/wsgi.py | krishnaglodha/CRUD-using-geodjango | 2c9e4c3184499ddc3e04b961dae77560b2a87c52 | [
"MIT"
] | 1 | 2021-08-30T15:46:23.000Z | 2021-08-30T15:46:23.000Z | """
WSGI config for crudgeodjangoproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudgeodjangoproj.settings')
application = get_wsgi_application()
| 24.176471 | 78 | 0.79562 |
a73cedab4981eef3699ea50f51a0e41174b107c6 | 32,318 | py | Python | vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context as n_context
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.db import db
from vmware_nsx.services.vpnaas.common_v3 import ipsec_driver as common_driver
from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils
from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator
from vmware_nsxlib.v3 import exceptions as nsx_lib_exc
from vmware_nsxlib.v3 import nsx_constants as consts
from vmware_nsxlib.v3 import vpn_ipsec
LOG = logging.getLogger(__name__)
IPSEC = 'ipsec'
class NSXv3IPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver):
def __init__(self, service_plugin):
validator = ipsec_validator.IPsecV3Validator(service_plugin)
super(NSXv3IPsecVpnDriver, self).__init__(service_plugin, validator)
self._nsxlib = self._core_plugin.nsxlib
self._nsx_vpn = self._nsxlib.vpn_ipsec
registry.subscribe(
self._delete_local_endpoint, resources.ROUTER_GATEWAY,
events.AFTER_DELETE)
def _translate_cidr(self, cidr):
return self._nsxlib.firewall_section.get_ip_cidr_reference(
cidr,
consts.IPV6 if netaddr.valid_ipv6(cidr) else consts.IPV4)
def _translate_addresses_to_target(self, cidrs):
return [self._translate_cidr(ip) for ip in cidrs]
def _generate_ipsecvpn_firewall_rules(self, plugin_type, context,
router_id=None):
"""Return the firewall rules needed to allow vpn traffic"""
fw_rules = []
# get all the active services of this router
filters = {'router_id': [router_id],
'status': [constants.ACTIVE]}
services = self.vpn_plugin.get_vpnservices(
context.elevated(), filters=filters)
if not services:
return fw_rules
for srv in services:
subnet = self.l3_plugin.get_subnet(
context.elevated(), srv['subnet_id'])
local_cidrs = [subnet['cidr']]
# get all the active connections of this service
filters = {'vpnservice_id': [srv['id']],
'status': [constants.ACTIVE]}
connections = self.vpn_plugin.get_ipsec_site_connections(
context.elevated(), filters=filters)
for conn in connections:
peer_cidrs = conn['peer_cidrs']
fw_rules.append({
'display_name': 'VPN connection ' + conn['id'],
'action': consts.FW_ACTION_ALLOW,
'destinations': self._translate_addresses_to_target(
peer_cidrs),
'sources': self._translate_addresses_to_target(
local_cidrs)})
return fw_rules
def _update_firewall_rules(self, context, vpnservice):
LOG.debug("Updating vpn firewall rules for router %s",
vpnservice['router_id'])
self._core_plugin.update_router_firewall(
context, vpnservice['router_id'])
def _update_router_advertisement(self, context, vpnservice):
LOG.debug("Updating router advertisement rules for router %s",
vpnservice['router_id'])
router_id = vpnservice['router_id']
# skip no-snat router as it is already advertised,
# and router with no gw
rtr = self.l3_plugin.get_router(context, router_id)
if (not rtr.get('external_gateway_info') or
not rtr['external_gateway_info'].get('enable_snat', True)):
return
rules = []
# get all the active services of this router
filters = {'router_id': [router_id], 'status': [constants.ACTIVE]}
services = self.vpn_plugin.get_vpnservices(
context.elevated(), filters=filters)
rule_name_pref = 'VPN advertisement service'
for srv in services:
# use only services with active connections
filters = {'vpnservice_id': [srv['id']],
'status': [constants.ACTIVE]}
connections = self.vpn_plugin.get_ipsec_site_connections(
context.elevated(), filters=filters)
if not connections:
continue
subnet = self.l3_plugin.get_subnet(
context.elevated(), srv['subnet_id'])
rules.append({
'display_name': "%s %s" % (rule_name_pref, srv['id']),
'action': consts.FW_ACTION_ALLOW,
'networks': [subnet['cidr']]})
if rules:
logical_router_id = db.get_nsx_router_id(context.session,
router_id)
self._nsxlib.logical_router.update_advertisement_rules(
logical_router_id, rules, name_prefix=rule_name_pref)
def _nsx_tags(self, context, connection):
return self._nsxlib.build_v3_tags_payload(
connection, resource_type='os-vpn-connection-id',
project_name=context.tenant_name)
def _nsx_tags_for_reused(self):
# Service & Local endpoint can be reused cross tenants,
# so we do not add the tenant/object id.
return self._nsxlib.build_v3_api_version_tag()
def _create_ike_profile(self, context, connection):
"""Create an ike profile for a connection"""
# Note(asarfaty) the NSX profile can be reused, so we can consider
# creating it only once in the future, and keeping a use-count for it.
# There is no driver callback for profiles creation so it has to be
# done on connection creation.
ike_policy_id = connection['ikepolicy_id']
ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id)
try:
profile = self._nsx_vpn.ike_profile.create(
ikepolicy['name'] or ikepolicy['id'],
description=ikepolicy['description'],
encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[
ikepolicy['encryption_algorithm']],
digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[
ikepolicy['auth_algorithm']],
ike_version=ipsec_utils.IKE_VERSION_MAP[
ikepolicy['ike_version']],
dh_group=ipsec_utils.PFS_MAP[ikepolicy['pfs']],
sa_life_time=ikepolicy['lifetime']['value'],
tags=self._nsx_tags(context, connection))
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create an ike profile: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return profile['id']
def _delete_ike_profile(self, ikeprofile_id):
self._nsx_vpn.ike_profile.delete(ikeprofile_id)
def _create_ipsec_profile(self, context, connection):
"""Create an ipsec profile for a connection"""
# Note(asarfaty) the NSX profile can be reused, so we can consider
# creating it only once in the future, and keeping a use-count for it.
# There is no driver callback for profiles creation so it has to be
# done on connection creation.
ipsec_policy_id = connection['ipsecpolicy_id']
ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(
context, ipsec_policy_id)
try:
profile = self._nsx_vpn.tunnel_profile.create(
ipsecpolicy['name'] or ipsecpolicy['id'],
description=ipsecpolicy['description'],
encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[
ipsecpolicy['encryption_algorithm']],
digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[
ipsecpolicy['auth_algorithm']],
dh_group=ipsec_utils.PFS_MAP[ipsecpolicy['pfs']],
pfs=True,
sa_life_time=ipsecpolicy['lifetime']['value'],
tags=self._nsx_tags(context, connection))
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a tunnel profile: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return profile['id']
def _delete_ipsec_profile(self, ipsecprofile_id):
self._nsx_vpn.tunnel_profile.delete(ipsecprofile_id)
def _create_dpd_profile(self, context, connection):
dpd_info = connection['dpd']
try:
profile = self._nsx_vpn.dpd_profile.create(
self._get_dpd_profile_name(connection),
description='neutron dpd profile',
timeout=dpd_info.get('timeout'),
enabled=True if dpd_info.get('action') == 'hold' else False,
tags=self._nsx_tags(context, connection))
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a DPD profile: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return profile['id']
def _delete_dpd_profile(self, dpdprofile_id):
self._nsx_vpn.dpd_profile.delete(dpdprofile_id)
def _update_dpd_profile(self, connection, dpdprofile_id):
dpd_info = connection['dpd']
self._nsx_vpn.dpd_profile.update(dpdprofile_id,
name=self._get_dpd_profile_name(connection),
timeout=dpd_info.get('timeout'),
enabled=True if dpd_info.get('action') == 'hold' else False)
def _create_peer_endpoint(self, context, connection, ikeprofile_id,
ipsecprofile_id, dpdprofile_id):
default_auth = vpn_ipsec.AuthenticationModeTypes.AUTH_MODE_PSK
try:
peer_endpoint = self._nsx_vpn.peer_endpoint.create(
connection['name'] or connection['id'],
connection['peer_address'],
connection['peer_id'],
description=connection['description'],
authentication_mode=default_auth,
dpd_profile_id=dpdprofile_id,
ike_profile_id=ikeprofile_id,
ipsec_tunnel_profile_id=ipsecprofile_id,
connection_initiation_mode=ipsec_utils.INITIATION_MODE_MAP[
connection['initiator']],
psk=connection['psk'],
tags=self._nsx_tags(context, connection))
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a peer endpoint: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return peer_endpoint['id']
def _update_peer_endpoint(self, peer_ep_id, connection):
self._nsx_vpn.peer_endpoint.update(
peer_ep_id,
name=connection['name'] or connection['id'],
peer_address=connection['peer_address'],
peer_id=connection['peer_id'],
description=connection['description'],
connection_initiation_mode=ipsec_utils.INITIATION_MODE_MAP[
connection['initiator']],
psk=connection['psk'])
def _delete_peer_endpoint(self, peer_ep_id):
self._nsx_vpn.peer_endpoint.delete(peer_ep_id)
def _get_profiles_from_peer_endpoint(self, peer_ep_id):
peer_ep = self._nsx_vpn.peer_endpoint.get(peer_ep_id)
return (
peer_ep['ike_profile_id'],
peer_ep['ipsec_tunnel_profile_id'],
peer_ep['dpd_profile_id'])
def _create_local_endpoint(self, context, local_addr, nsx_service_id,
router_id, project_id):
"""Creating an NSX local endpoint for a logical router
This endpoint can be reused by other connections, and will be deleted
when the router is deleted or gateway is removed
"""
# Add the neutron router-id to the tags to help search later
tags = self._nsxlib.build_v3_tags_payload(
{'id': router_id, 'project_id': project_id},
resource_type='os-neutron-router-id',
project_name=context.tenant_name)
try:
local_endpoint = self._nsx_vpn.local_endpoint.create(
'Local endpoint for OS VPNaaS',
local_addr,
nsx_service_id,
tags=tags)
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a local endpoint: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return local_endpoint['id']
def _search_local_endpint(self, router_id):
tags = [{'scope': 'os-neutron-router-id', 'tag': router_id}]
ep_list = self._nsxlib.search_by_tags(
tags=tags,
resource_type=self._nsx_vpn.local_endpoint.resource_type)
if ep_list['results']:
return ep_list['results'][0]['id']
def _get_local_endpoint(self, context, vpnservice):
"""Get the id of the local endpoint for a service
The NSX allows only one local endpoint per local address
This method will create it if there is not matching endpoint
"""
# use the router GW as the local ip
router_id = vpnservice['router']['id']
# check if we already have this endpoint on the NSX
local_ep_id = self._search_local_endpint(router_id)
if local_ep_id:
return local_ep_id
# create a new one
local_addr = vpnservice['external_v4_ip']
nsx_service_id = self._get_nsx_vpn_service(context, vpnservice)
local_ep_id = self._create_local_endpoint(
context, local_addr, nsx_service_id, router_id,
vpnservice['project_id'])
return local_ep_id
def _delete_local_endpoint_by_router(self, context, router_id):
# delete the local endpoint from the NSX
local_ep_id = self._search_local_endpint(router_id)
if local_ep_id:
self._nsx_vpn.local_endpoint.delete(local_ep_id)
# delete the neutron port with this IP
port = self._find_vpn_service_port(context, router_id)
if port:
self.l3_plugin.delete_port(context, port['id'],
force_delete_vpn=True)
def _delete_local_endpoint(self, resource, event, trigger, **kwargs):
"""Upon router deletion / gw removal delete the matching endpoint"""
router_id = kwargs.get('router_id')
ctx = n_context.get_admin_context()
self._delete_local_endpoint_by_router(ctx, router_id)
def validate_router_gw_info(self, context, router_id, gw_info):
"""Upon router gw update - verify no-snat"""
# check if this router has a vpn service
admin_con = context.elevated()
# get all relevant services, except those waiting to be deleted or in
# ERROR state
filters = {'router_id': [router_id],
'status': [constants.ACTIVE, constants.PENDING_CREATE,
constants.INACTIVE, constants.PENDING_UPDATE]}
services = self.vpn_plugin.get_vpnservices(admin_con, filters=filters)
if services:
# do not allow enable-snat
if (gw_info and
gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)):
raise common_driver.RouterWithSNAT(router_id=router_id)
else:
# if this is a non-vpn router. if snat was disabled, should check
# there is no overlapping with vpn connections
if (gw_info and
not gw_info.get('enable_snat',
cfg.CONF.enable_snat_by_default)):
# get router subnets
subnets = self._core_plugin._find_router_subnets_cidrs(
context, router_id)
# find all vpn services with connections
if not self._check_subnets_overlap_with_all_conns(
admin_con, subnets):
raise common_driver.RouterWithOverlapNoSnat(
router_id=router_id)
def _get_session_rules(self, context, connection, vpnservice):
# TODO(asarfaty): support vpn-endpoint-groups too
peer_cidrs = connection['peer_cidrs']
local_cidrs = [vpnservice['subnet']['cidr']]
rule = self._nsx_vpn.session.get_rule_obj(local_cidrs, peer_cidrs)
return [rule]
def _create_session(self, context, connection, local_ep_id,
peer_ep_id, rules, enabled=True):
try:
session = self._nsx_vpn.session.create(
connection['name'] or connection['id'],
local_ep_id, peer_ep_id, rules,
description=connection['description'],
tags=self._nsx_tags(context, connection),
enabled=enabled)
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a session: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return session['id']
def _update_session(self, session_id, connection, rules=None,
enabled=True):
self._nsx_vpn.session.update(
session_id,
name=connection['name'] or connection['id'],
description=connection['description'],
policy_rules=rules,
enabled=enabled)
def get_ipsec_site_connection_status(self, context, ipsec_site_conn_id):
mapping = db.get_nsx_vpn_connection_mapping(
context.session, ipsec_site_conn_id)
if not mapping or not mapping['session_id']:
LOG.info("Couldn't find NSX session for VPN connection %s",
ipsec_site_conn_id)
return
status_result = self._nsx_vpn.session.get_status(mapping['session_id'])
if status_result and 'session_status' in status_result:
status = status_result['session_status']
# NSX statuses are UP, DOWN, DEGRADE
# VPNaaS connection status should be ACTIVE or DOWN
if status == 'UP':
return 'ACTIVE'
elif status == 'DOWN' or status == 'DEGRADED':
return 'DOWN'
def _delete_session(self, session_id):
self._nsx_vpn.session.delete(session_id)
def create_ipsec_site_connection(self, context, ipsec_site_conn):
LOG.debug('Creating ipsec site connection %(conn_info)s.',
{"conn_info": ipsec_site_conn})
# Note(asarfaty) the plugin already calls the validator
# which also validated the policies and service
ikeprofile_id = None
ipsecprofile_id = None
dpdprofile_id = None
peer_ep_id = None
session_id = None
vpnservice_id = ipsec_site_conn['vpnservice_id']
vpnservice = self.service_plugin._get_vpnservice(
context, vpnservice_id)
ipsec_id = ipsec_site_conn["id"]
try:
# create the ike profile
ikeprofile_id = self._create_ike_profile(
context, ipsec_site_conn)
LOG.debug("Created NSX ike profile %s", ikeprofile_id)
# create the ipsec profile
ipsecprofile_id = self._create_ipsec_profile(
context, ipsec_site_conn)
LOG.debug("Created NSX ipsec profile %s", ipsecprofile_id)
# create the dpd profile
dpdprofile_id = self._create_dpd_profile(
context, ipsec_site_conn)
LOG.debug("Created NSX dpd profile %s", dpdprofile_id)
# create the peer endpoint and add to the DB
peer_ep_id = self._create_peer_endpoint(
context, ipsec_site_conn,
ikeprofile_id, ipsecprofile_id, dpdprofile_id)
LOG.debug("Created NSX peer endpoint %s", peer_ep_id)
# create or reuse a local endpoint using the vpn service
local_ep_id = self._get_local_endpoint(context, vpnservice)
# Finally: create the session with policy rules
rules = self._get_session_rules(
context, ipsec_site_conn, vpnservice)
connection_enabled = (vpnservice['admin_state_up'] and
ipsec_site_conn['admin_state_up'])
session_id = self._create_session(
context, ipsec_site_conn, local_ep_id, peer_ep_id, rules,
enabled=connection_enabled)
# update the DB with the session id
db.add_nsx_vpn_connection_mapping(
context.session, ipsec_site_conn['id'], session_id,
dpdprofile_id, ikeprofile_id, ipsecprofile_id, peer_ep_id)
self._update_status(context, vpnservice_id, ipsec_id,
constants.ACTIVE)
except nsx_exc.NsxPluginException:
with excutils.save_and_reraise_exception():
self._update_status(context, vpnservice_id, ipsec_id,
constants.ERROR)
# delete the NSX objects that were already created
# Do not delete reused objects: service, local endpoint
if session_id:
self._delete_session(session_id)
if peer_ep_id:
self._delete_peer_endpoint(peer_ep_id)
if dpdprofile_id:
self._delete_dpd_profile(dpdprofile_id)
if ipsecprofile_id:
self._delete_ipsec_profile(ipsecprofile_id)
if ikeprofile_id:
self._delete_ike_profile(ikeprofile_id)
# update router firewall rules
self._update_firewall_rules(context, vpnservice)
# update router advertisement rules
self._update_router_advertisement(context, vpnservice)
def delete_ipsec_site_connection(self, context, ipsec_site_conn):
LOG.debug('Deleting ipsec site connection %(site)s.',
{"site": ipsec_site_conn})
vpnservice_id = ipsec_site_conn['vpnservice_id']
vpnservice = self.service_plugin._get_vpnservice(
context, vpnservice_id)
# get all data from the nsx based on the connection id in the DB
mapping = db.get_nsx_vpn_connection_mapping(
context.session, ipsec_site_conn['id'])
if not mapping:
LOG.warning("Couldn't find nsx ids for VPN connection %s",
ipsec_site_conn['id'])
# Do not fail the deletion
return
if mapping['session_id']:
self._delete_session(mapping['session_id'])
if mapping['peer_ep_id']:
self._delete_peer_endpoint(mapping['peer_ep_id'])
if mapping['dpd_profile_id']:
self._delete_dpd_profile(mapping['dpd_profile_id'])
if mapping['ipsec_profile_id']:
self._delete_ipsec_profile(mapping['ipsec_profile_id'])
if mapping['ike_profile_id']:
self._delete_ike_profile(mapping['ike_profile_id'])
# Do not delete the local endpoint and service as they are reused
db.delete_nsx_vpn_connection_mapping(context.session,
ipsec_site_conn['id'])
# update router firewall rules
self._update_firewall_rules(context, vpnservice)
# update router advertisement rules
self._update_router_advertisement(context, vpnservice)
def update_ipsec_site_connection(self, context, old_ipsec_conn,
ipsec_site_conn):
LOG.debug('Updating ipsec site connection new %(site)s.',
{"site": ipsec_site_conn})
LOG.debug('Updating ipsec site connection old %(site)s.',
{"site": old_ipsec_conn})
# Note(asarfaty) the plugin already calls the validator
# which also validated the policies and service
ipsec_id = old_ipsec_conn['id']
vpnservice_id = old_ipsec_conn['vpnservice_id']
vpnservice = self.service_plugin._get_vpnservice(
context, vpnservice_id)
mapping = db.get_nsx_vpn_connection_mapping(
context.session, ipsec_site_conn['id'])
if not mapping:
LOG.error("Couldn't find nsx ids for VPN connection %s",
ipsec_site_conn['id'])
self._update_status(context, vpnservice_id, ipsec_id, "ERROR")
raise nsx_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id)
# check if the dpd configuration changed
old_dpd = old_ipsec_conn['dpd']
new_dpd = ipsec_site_conn['dpd']
if (old_dpd['action'] != new_dpd['action'] or
old_dpd['timeout'] != new_dpd['timeout'] or
old_ipsec_conn['name'] != ipsec_site_conn['name']):
self._update_dpd_profile(ipsec_site_conn,
mapping['dpd_profile_id'])
# update peer endpoint with all the parameters that could be modified
# Note(asarfaty): local endpoints are reusable and will not be updated
self._update_peer_endpoint(mapping['peer_ep_id'], ipsec_site_conn)
rules = self._get_session_rules(
context, ipsec_site_conn, vpnservice)
connection_enabled = (vpnservice['admin_state_up'] and
ipsec_site_conn['admin_state_up'])
self._update_session(mapping['session_id'], ipsec_site_conn, rules,
enabled=connection_enabled)
if ipsec_site_conn['peer_cidrs'] != old_ipsec_conn['peer_cidrs']:
# Update firewall
self._update_firewall_rules(context, vpnservice)
# No service updates. No need to update router advertisement rules
def _create_vpn_service(self, tier0_uuid):
try:
service = self._nsx_vpn.service.create(
'Neutron VPN service for T0 router ' + tier0_uuid,
tier0_uuid,
enabled=True,
ike_log_level=ipsec_utils.DEFAULT_LOG_LEVEL,
tags=self._nsx_tags_for_reused())
except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create vpn service: %s") % e
raise nsx_exc.NsxPluginException(err_msg=msg)
return service['id']
def _find_vpn_service(self, tier0_uuid, validate=True):
# find the service for the tier0 router in the NSX.
# Note(asarfaty) we expect only a small number of services
services = self._nsx_vpn.service.list()['results']
for srv in services:
if srv['logical_router_id'] == tier0_uuid:
# if it exists but disabled: issue an error
if validate and not srv.get('enabled', True):
msg = _("NSX vpn service %s must be enabled") % srv['id']
raise nsx_exc.NsxPluginException(err_msg=msg)
return srv['id']
def _get_service_tier0_uuid(self, context, vpnservice):
router_id = vpnservice['router_id']
router_db = self._core_plugin._get_router(context, router_id)
return self._core_plugin._get_tier0_uuid_by_router(context, router_db)
def _create_vpn_service_if_needed(self, context, vpnservice):
# The service is created on the TIER0 router attached to the router GW
# The NSX can keep only one service per tier0 router so we reuse it
tier0_uuid = self._get_service_tier0_uuid(context, vpnservice)
if self._find_vpn_service(tier0_uuid):
return
# create a new one
self._create_vpn_service(tier0_uuid)
def _delete_vpn_service_if_needed(self, context, vpnservice):
# Delete the VPN service on the NSX if no other service connected
# to the same tier0 use it
elev_context = context.elevated()
tier0_uuid = self._get_service_tier0_uuid(elev_context, vpnservice)
all_services = self.vpn_plugin.get_vpnservices(elev_context)
for srv in all_services:
if (srv['id'] != vpnservice['id'] and
self._get_service_tier0_uuid(elev_context, srv) == tier0_uuid):
LOG.info("Not deleting vpn service from the NSX as other "
"neutron vpn services still use it.")
return
# Find the NSX-ID
srv_id = self._get_nsx_vpn_service(elev_context, vpnservice)
if not srv_id:
LOG.error("Not deleting vpn service from the NSX as the "
"service was not found on the NSX.")
return
try:
self._nsx_vpn.service.delete(srv_id)
except Exception as e:
LOG.error("Failed to delete VPN service %s: %s",
srv_id, e)
def _delete_local_endpoints_if_needed(self, context, vpnservice):
"""When deleting the last service of a logical router
delete its local endpoint
"""
router_id = vpnservice['router_id']
elev_context = context.elevated()
filters = {'router_id': [router_id]}
services = self.vpn_plugin.get_vpnservices(
elev_context, filters=filters)
if not services:
self._delete_local_endpoint_by_router(elev_context, router_id)
def _get_nsx_vpn_service(self, context, vpnservice):
tier0_uuid = self._get_service_tier0_uuid(context, vpnservice)
return self._find_vpn_service(tier0_uuid, validate=False)
def create_vpnservice(self, context, vpnservice):
#TODO(asarfaty) support vpn-endpoint-group-create for local & peer
# cidrs too
LOG.debug('Creating VPN service %(vpn)s', {'vpn': vpnservice})
vpnservice_id = vpnservice['id']
vpnservice = self.service_plugin._get_vpnservice(context,
vpnservice_id)
try:
self.validator.validate_vpnservice(context, vpnservice)
local_address = self._get_service_local_address(
context.elevated(), vpnservice)
except Exception:
with excutils.save_and_reraise_exception():
# Rolling back change on the neutron
self.service_plugin.delete_vpnservice(context, vpnservice_id)
vpnservice['external_v4_ip'] = local_address
self.service_plugin.set_external_tunnel_ips(context,
vpnservice_id,
v4_ip=local_address)
self._create_vpn_service_if_needed(context, vpnservice)
def update_vpnservice(self, context, old_vpnservice, vpnservice):
# Only handle the case of admin-state-up changes
if old_vpnservice['admin_state_up'] != vpnservice['admin_state_up']:
# update all relevant connections
filters = {'vpnservice_id': [vpnservice['id']]}
connections = self.vpn_plugin.get_ipsec_site_connections(
context, filters=filters)
for conn in connections:
mapping = db.get_nsx_vpn_connection_mapping(
context.session, conn['id'])
if mapping:
connection_enabled = (vpnservice['admin_state_up'] and
conn['admin_state_up'])
self._update_session(mapping['session_id'], conn,
enabled=connection_enabled)
def delete_vpnservice(self, context, vpnservice):
self._delete_local_endpoints_if_needed(context, vpnservice)
self._delete_vpn_service_if_needed(context, vpnservice)
| 44.94854 | 79 | 0.625627 |
845e5a526882a627caf2eeabaaac0f78d9bf770d | 5,169 | py | Python | oarepo_model_builder_multilingual/property_preprocessors/i18nStr.py | oarepo/oarepo-model-builder-multilingual | 884da6667dfd6f4bb2c255b4f42d6d4de999d2e8 | [
"MIT"
] | null | null | null | oarepo_model_builder_multilingual/property_preprocessors/i18nStr.py | oarepo/oarepo-model-builder-multilingual | 884da6667dfd6f4bb2c255b4f42d6d4de999d2e8 | [
"MIT"
] | 2 | 2022-02-06T20:03:11.000Z | 2022-03-07T11:01:39.000Z | oarepo_model_builder_multilingual/property_preprocessors/i18nStr.py | oarepo/oarepo-model-builder-multilingual | 884da6667dfd6f4bb2c255b4f42d6d4de999d2e8 | [
"MIT"
] | null | null | null | from oarepo_model_builder.builders.jsonschema import JSONSchemaBuilder
from oarepo_model_builder.builders.mapping import MappingBuilder
from oarepo_model_builder.invenio.invenio_record_schema import InvenioRecordSchemaBuilder
from oarepo_model_builder.property_preprocessors import PropertyPreprocessor, process
from oarepo_model_builder.stack import ReplaceElement, ModelBuilderStack
from oarepo_model_builder.utils.camelcase import camel_case
from oarepo_model_builder.utils.deepmerge import deepmerge
def alternative_gen(supported_langs, key):
data = {}
for lan in supported_langs:
alt = {key + '_' + lan: {
'type': 'fulltext+keyword',
}}
multilang_options = {}
if 'text' in supported_langs[lan]:
deepmerge(multilang_options, supported_langs[lan]['text'])
if 'sort' in supported_langs[lan]:
sort = deepmerge(supported_langs[lan]['sort'], {'index': False, 'language': lan})
deepmerge(multilang_options, {'sort': sort})
if 'keyword' in supported_langs[lan]:
deepmerge(multilang_options, {'fields': {'keyword': supported_langs[lan]['keyword']}})
deepmerge(
alt[key + '_' + lan].setdefault("oarepo:mapping", {}),
multilang_options,
[],
)
data = deepmerge(data, alt)
return data
class I18nStrPreprocessor(PropertyPreprocessor):
@process(model_builder=JSONSchemaBuilder,
path='**/properties/*',
condition=lambda current, stack: current.type == 'i18nStr')
def modify_multilang_schema(self, data, stack: ModelBuilderStack, **kwargs):
data['type'] = 'object'
definition = data.get('oarepo:multilingual', {})
properties = data.get('properties', {})
lang = definition.get('lang-field', 'lang')
value = definition.get('value-field', 'value')
properties = data.get('properties', {})
data['properties'] = {
lang: {
'type': 'string',
'required': True
},
value: {
'type': 'string',
'required': True
}, **properties
}
return data
@process(model_builder=MappingBuilder,
path='**/properties/*',
condition=lambda current, stack: current.type == 'i18nStr')
def modify_multilang_mapping(self, data, stack: ModelBuilderStack, **kwargs):
alternative = alternative_gen(self.settings['supported-langs'], stack.top.key)
definition = data.get('oarepo:multilingual', {})
lang = definition.get('lang-field', 'lang')
value = definition.get('value-field', 'value')
properties = data.get('properties', {})
data = {
stack.top.key: {
'type': 'object',
'properties': {
lang: {
'type': 'keyword'
},
value: {
'type': 'fulltext'
}, **properties
}
}
}
deepmerge(data, alternative)
raise ReplaceElement(data)
@process(model_builder=InvenioRecordSchemaBuilder,
path='**/properties/*',
condition=lambda current, stack: current.type == 'i18nStr')
def modify_multilang_marshmallow(self, data, stack: ModelBuilderStack, **kwargs):
definition = data.get('oarepo:multilingual', {})
use_i18n = False
if 'usei18n' in definition:
use_i18n = True
lang = definition.get('lang-field', 'lang')
value = definition.get('value-field', 'value')
properties = data.get('properties', {})
if lang == 'lang' and value == 'value' and not use_i18n:
data['type'] = 'object'
deepmerge(data.setdefault('oarepo:marshmallow', {}), {
'class': self.settings.python.i18n_schema_class,
'nested': True
})
else:
data['type'] = 'object'
data['properties'] = {
lang: {
'type': 'string',
'required': True
},
value: {
'type': 'string',
'required': True
}, **properties
}
if 'oarepo:marshmallow' in data and 'class' in data['oarepo:multilingual']:
class_name = data['oarepo:marshmallow']['class']
else:
class_name = camel_case(stack.top.key) + 'Schema'
deepmerge(data.setdefault('oarepo:marshmallow', {}), {
'generate': True,
'class': class_name,
'nested': True,
'validates': {lang: { 'imports' : ['import langcodes'],'definition' :'''def validate_lang(self, value):
if value != "_" and not langcodes.Language.get(value).is_valid():
raise ma_ValidationError("Invalid language code")'''}}
})
return data
| 36.921429 | 119 | 0.54614 |
aa8b27be3925fabd901a50971cf9697e20593d51 | 1,124 | py | Python | leetcode/55-jump-game.py | ardakkk/Algorithms-and-Data-Structures | c428bb0bd7eeb6c34448630f88f13e1329b54636 | [
"MIT"
] | null | null | null | leetcode/55-jump-game.py | ardakkk/Algorithms-and-Data-Structures | c428bb0bd7eeb6c34448630f88f13e1329b54636 | [
"MIT"
] | null | null | null | leetcode/55-jump-game.py | ardakkk/Algorithms-and-Data-Structures | c428bb0bd7eeb6c34448630f88f13e1329b54636 | [
"MIT"
] | null | null | null | # Given an array of non-negative integers, you are initially positioned at the first index of the array.
#
# Each element in the array represents your maximum jump length at that position.
#
# Determine if you are able to reach the last index.
#
# Example 1:
#
# Input: [2,3,1,1,4]
# Output: true
# Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
# Time: O(n^2) | Space: O(n) DP array same size as Input Array
# Dynamic programming solution
class Solution:
def canJump(self, nums):
dp_positions = [False] * len(nums)
dp_positions[0] = True
for j in range(1, len(nums)):
for i in range(j):
if dp_positions[i] and i + nums[i] >= j:
dp_positions[j] = True
return dp_positions[-1]
# Time: O
class Solution2:
def canJump(self, nums):
max_reach = 0
for current_step in range(len(nums)):
if current_step > max_reach:
return False
current_reach = current_step + nums[current_step]
max_reach = max(max_reach, current_reach)
return True | 29.578947 | 104 | 0.622776 |
072c615ea899aa739d681c2b9847389c1e3fa32b | 69 | py | Python | bot/database/__init__.py | TheShubhendra/quora-discord | db5c9810ca63760b9703eeb704c4b0f69089ca74 | [
"MIT"
] | 4 | 2021-07-28T05:15:06.000Z | 2021-10-06T05:28:54.000Z | bot/database/__init__.py | TheShubhendra/quora-discord | db5c9810ca63760b9703eeb704c4b0f69089ca74 | [
"MIT"
] | 1 | 2021-08-05T12:36:00.000Z | 2021-08-05T12:36:00.000Z | bot/database/__init__.py | TheShubhendra/quora-discord | db5c9810ca63760b9703eeb704c4b0f69089ca74 | [
"MIT"
] | 2 | 2021-08-05T09:53:55.000Z | 2022-03-02T13:36:36.000Z | from .dbmanager import DatabaseManager
__all__ = [DatabaseManager]
| 13.8 | 38 | 0.811594 |
52415976f658dd37f09052a9452803484751068e | 831 | py | Python | pwndbg/commands/reload.py | ctfhacker/pwndbg | 22867ed15378c7fc77c43194cc342e2b80489345 | [
"MIT"
] | null | null | null | pwndbg/commands/reload.py | ctfhacker/pwndbg | 22867ed15378c7fc77c43194cc342e2b80489345 | [
"MIT"
] | null | null | null | pwndbg/commands/reload.py | ctfhacker/pwndbg | 22867ed15378c7fc77c43194cc342e2b80489345 | [
"MIT"
] | null | null | null | try:
from __builtins__ import reload as _reload
except:
from imp import reload as _reload
import imp
import os
import sys
import types
import gdb
import pwndbg
import pwndbg.commands
import pwndbg.events
def rreload(module, mdict=None):
"""Recursively reload modules."""
name = module.__name__
if mdict is None:
mdict = []
for attribute_name in getattr(module, '__all__', []) or []:
attribute = getattr(module, attribute_name, None)
if isinstance(attribute, types.ModuleType) and attribute not in mdict:
mdict.append(attribute)
rreload(attribute, mdict)
try:
_reload(module)
except Exception as e:
pass
@pwndbg.commands.Command
def reload(*a):
pwndbg.events.on_reload()
rreload(pwndbg)
pwndbg.events.after_reload()
| 20.775 | 78 | 0.676294 |
f0c93059650a41d140530476d30de6837e49cb19 | 3,688 | py | Python | lib/taglib/objects.py | kateliev/taglib | e3fc049d9621cac91998f8d979e709fbfdeacfc8 | [
"MIT"
] | null | null | null | lib/taglib/objects.py | kateliev/taglib | e3fc049d9621cac91998f8d979e709fbfdeacfc8 | [
"MIT"
] | null | null | null | lib/taglib/objects.py | kateliev/taglib | e3fc049d9621cac91998f8d979e709fbfdeacfc8 | [
"MIT"
] | null | null | null | # encoding: utf-8
# ----------------------------------------------------
# MODULE: taglib.objects
# ----------------------------------------------------
# (C) Vassil Kateliev, 2021
# (C) http://www.kateliev.com
# (C) https://github.com/kateliev
# ----------------------------------------------------
# NOTE: Module is kept Python 2 and 3 compatible!
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -------------------------------------
from __future__ import absolute_import, print_function, unicode_literals
# - Init --------------------------------------------
__version__ = 2.6
# - Classes -----------------------------------------
# -- Abstract base classes --------------------------
class markup_config(object):
''' Base markup config object'''
def __init__(self):
self.whitespace = ' '*4
self.tags = []
self.template_start_end = '{fh}<{tag}{attrib}>{fch}{content}{ft}</{tag}>'
self.template_empty = '{fh}<{tag}{attrib}/>'
self.document = ''
class abstract_builder(object):
def __init__(self, markup_config):
'''Base Abstract builder class.
Args:
markup_tags list(string): A list of markup tags that form a language
Returns:
markup_builder (object)
'''
# - Externals
self.stack = []
# - Internals
self.__markup_config = markup_config
self.__indent = lambda level: level * self.__markup_config.whitespace
self.__raw_mark = '__'
self.__raw_tokens = ['__raw__', '__r', '__string__', '__s']
# -- Dynamic build of class methods
for keyword in self.__markup_config.tags:
setattr(self.__class__, keyword, eval("lambda the_class, content='', **kwargs: the_class.element('%s', content, **kwargs)" %keyword))
def element(self, tag, content, **kwargs):
'''Add new markup element to the command stack.
Args:
tag (string) : Valid markup Tag;
content (string): Content. If empty (''), provides nested container functionality or empty tag;
attribs (kwargs): Valid markup attributes as keyword arguments.
Special raw formatting ['__raw__', '__r', '__string__', '__s'] denote strings that are not
Python compatible, like attribute names containing hyphens or column.
Returns:
Content (string) or markup_builder (object)
'''
assert tag in self.__markup_config.tags, 'Unrecognized language element <%s>' %tag
if content == '':
content = self.__class__()
if len(kwargs.keys()):
attrib = ' ' + ' '.join(['{}="{}"'.format(attrib.strip(self.__raw_mark), value) if attrib not in self.__raw_tokens else value for attrib, value in kwargs.items()])
else:
attrib = ''
self.stack.append((tag, content, attrib))
return content
def reset(self):
self.stack = []
def dumps(self, indent_level=0):
'''Build markup by dumping the command stack as string.'''
export_markup = ''
# - Build
for item in self.stack:
tag, content, attrib = item
fh = ft = '\n' + self.__indent(indent_level - 1)
fch = '\n' + self.__indent(indent_level)
if isinstance(content, self.__class__):
content = content.dumps(indent_level + 1)
fch = self.__indent(indent_level)
if len(content):
export_markup += self.__markup_config.template_start_end.format(tag=tag, content=content, attrib=attrib, fh=fh, fch=fch, ft=ft)
else:
export_markup += self.__markup_config.template_empty.format(tag=tag, attrib=attrib, fh=fh, fch=fch, ft=ft)
return export_markup
def dump(self, filename):
'''Build markup document by dumping the command stack to a file.'''
markup_document = self.__markup_config.document + self.dumps(0)
with open(filename, 'w') as markup_file:
markup_file.writelines(markup_document) | 34.792453 | 166 | 0.632321 |
d0e690198a5db9f077aa0cc9d5d62093d04a67a3 | 30,614 | py | Python | thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | 1 | 2016-05-09T14:43:53.000Z | 2016-05-09T14:43:53.000Z | google/appengine/ext/db/djangoforms.py | Arachnid/google_appengine | 2e950619f5027f414131fafc3cc253af4875a0fe | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/db/djangoforms.py | Arachnid/google_appengine | 2e950619f5027f414131fafc3cc253af4875a0fe | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for creating Django (new) forms from Datastore data models.
This is our best shot at supporting as much of Django as possible: you
won't be able to use Django's db package, but you can use our
db package instead, and create Django forms from it, either fully
automatically, or with overrides.
Note, you should not import these classes from this module. Importing
this module patches the classes in place, and you should continue to
import them from google.appengine.db.
Some of the code here is strongly inspired by Django's own ModelForm
class (new in Django 0.97). Our code also supports Django 0.96 (so as
to be maximally compatible). Note that our API is always similar to
Django 0.97's API, even when used with Django 0.96 (which uses a
different API, chiefly form_for_model()).
Terminology notes:
- forms: always refers to the Django newforms subpackage
- field: always refers to a Django forms.Field instance
- property: always refers to a db.Property instance
Mapping between properties and fields:
+====================+===================+==============+====================+
| Property subclass | Field subclass | datatype | widget; notes |
+====================+===================+==============+====================+
| StringProperty | CharField | unicode | Textarea |
| | | | if multiline |
+--------------------+-------------------+--------------+--------------------+
| TextProperty | CharField | unicode | Textarea |
+--------------------+-------------------+--------------+--------------------+
| BlobProperty | FileField | str | skipped in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| DateTimeProperty | DateTimeField | datetime | skipped |
| | | | if auto_now[_add] |
+--------------------+-------------------+--------------+--------------------+
| DateProperty | DateField | date | ditto |
+--------------------+-------------------+--------------+--------------------+
| TimeProperty | TimeField | time | ditto |
+--------------------+-------------------+--------------+--------------------+
| IntegerProperty | IntegerField | int or long | |
+--------------------+-------------------+--------------+--------------------+
| FloatProperty | FloatField | float | CharField in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+--------------------+
| UserProperty | CharField | users.User | |
+--------------------+-------------------+--------------+--------------------+
| StringListProperty | CharField | list of str | Textarea |
+--------------------+-------------------+--------------+--------------------+
| LinkProperty | URLField | str | |
+--------------------+-------------------+--------------+--------------------+
| ReferenceProperty | ModelChoiceField* | db.Model | |
+--------------------+-------------------+--------------+--------------------+
| _ReverseReferenceP.| None | <iterable> | always skipped |
+====================+===================+==============+====================+
Notes:
*: this Field subclasses is defined by us, not in Django.
"""
import itertools
import logging
import django.core.exceptions
import django.utils.datastructures
try:
from django import newforms as forms
except ImportError:
from django import forms
try:
from django.utils.translation import ugettext_lazy as _
except ImportError:
pass
from google.appengine.api import users
from google.appengine.ext import db
def monkey_patch(name, bases, namespace):
"""A 'metaclass' for adding new methods to an existing class.
In this version, existing methods can't be overridden; this is by
design, to avoid accidents.
Usage example:
class PatchClass(TargetClass):
__metaclass__ = monkey_patch
def foo(self, ...): ...
def bar(self, ...): ...
This is equivalent to:
def foo(self, ...): ...
def bar(self, ...): ...
TargetClass.foo = foo
TargetClass.bar = bar
PatchClass = TargetClass
Note that PatchClass becomes an alias for TargetClass; by convention
it is recommended to give PatchClass the same name as TargetClass.
"""
assert len(bases) == 1, 'Exactly one base class is required'
base = bases[0]
for name, value in namespace.iteritems():
if name not in ('__metaclass__', '__module__'):
assert name not in base.__dict__, "Won't override attribute %r" % (name,)
setattr(base, name, value)
return base
class Property(db.Property):
__metaclass__ = monkey_patch
def get_form_field(self, form_class=forms.CharField, **kwargs):
"""Return a Django form field appropriate for this property.
Args:
form_class: a forms.Field subclass, default forms.CharField
Additional keyword arguments are passed to the form_class constructor,
with certain defaults:
required: self.required
label: prettified self.verbose_name, if not None
widget: a forms.Select instance if self.choices is non-empty
initial: self.default, if not None
Returns:
A fully configured instance of form_class, or None if no form
field should be generated for this property.
"""
defaults = {'required': self.required}
if self.verbose_name:
defaults['label'] = self.verbose_name.capitalize().replace('_', ' ')
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((str(choice), unicode(choice)))
defaults['widget'] = forms.Select(choices=choices)
if self.default is not None:
defaults['initial'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
Override this to do a property- or field-specific type conversion.
Args:
instance: a db.Model instance
Returns:
The property's value extracted from the instance, possibly
converted to a type suitable for a form field; possibly None.
By default this returns the instance attribute's value unchanged.
"""
return getattr(instance, self.name)
def make_value_from_form(self, value):
"""Convert a form value to a property value.
Override this to do a property- or field-specific type conversion.
Args:
value: the cleaned value retrieved from the form field
Returns:
A value suitable for assignment to a model instance's property;
possibly None.
By default this converts the value to self.data_type if it
isn't already an instance of that type, except if the value is
empty, in which case we return None.
"""
if value in (None, ''):
return None
if not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class UserProperty(db.Property):
"""This class exists solely to log a warning when it is used."""
def __init__(self, *args, **kwds):
logging.warn("Please don't use modelforms.UserProperty; "
"use db.UserProperty instead.")
super(UserProperty, self).__init__(*args, **kwds)
class StringProperty(db.StringProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a string property.
This sets the widget default to forms.Textarea if the property's
multiline attribute is set.
"""
defaults = {}
if self.multiline:
defaults['widget'] = forms.Textarea
defaults.update(kwargs)
return super(StringProperty, self).get_form_field(**defaults)
class TextProperty(db.TextProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a text property.
This sets the widget default to forms.Textarea.
"""
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextProperty, self).get_form_field(**defaults)
class BlobProperty(db.BlobProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a blob property.
This defaults to a forms.FileField instance when using Django 0.97
or later. For 0.96 this returns None, as file uploads are not
really supported in that version.
"""
if not hasattr(forms, 'FileField'):
return None
defaults = {'form_class': forms.FileField}
defaults.update(kwargs)
return super(BlobProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
There is no way to convert a Blob into an initial value for a file
upload, so we always return None.
"""
return None
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This extracts the content from the UploadedFile instance returned
by the FileField instance.
"""
if value.__class__.__name__ == 'UploadedFile':
return db.Blob(value.content)
return super(BlobProperty, self).make_value_from_form(value)
class DateTimeProperty(db.DateTimeProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date-time property.
This defaults to a DateTimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeProperty, self).get_form_field(**defaults)
class DateProperty(db.DateProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date property.
This defaults to a DateField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateProperty, self).get_form_field(**defaults)
class TimeProperty(db.TimeProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a time property.
This defaults to a TimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeProperty, self).get_form_field(**defaults)
class IntegerProperty(db.IntegerProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to an IntegerField instance.
"""
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerProperty, self).get_form_field(**defaults)
class FloatProperty(db.FloatProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to a FloatField instance when using Django 0.97 or
later. For 0.96 this defaults to the CharField class.
"""
defaults = {}
if hasattr(forms, 'FloatField'):
defaults['form_class'] = forms.FloatField
defaults.update(kwargs)
return super(FloatProperty, self).get_form_field(**defaults)
class BooleanProperty(db.BooleanProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a boolean property.
This defaults to a BooleanField.
"""
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanProperty, self).get_form_field(**defaults)
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This is needed to ensure that False is not replaced with None.
"""
if value is None:
return None
if isinstance(value, basestring) and value.lower() == 'false':
return False
return bool(value)
class StringListProperty(db.StringListProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a StringList property.
This defaults to a Textarea widget with a blank initial value.
"""
defaults = {'widget': forms.Textarea,
'initial': ''}
defaults.update(kwargs)
return super(StringListProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This joins a list of strings with newlines.
"""
value = super(StringListProperty, self).get_value_for_form(instance)
if not value:
return None
if isinstance(value, list):
value = '\n'.join(value)
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This breaks the string into lines.
"""
if not value:
return []
if isinstance(value, basestring):
value = value.splitlines()
return value
class LinkProperty(db.LinkProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a URL property.
This defaults to a URLField instance.
"""
defaults = {'form_class': forms.URLField}
defaults.update(kwargs)
return super(LinkProperty, self).get_form_field(**defaults)
class _WrapIter(object):
"""Helper class whose iter() calls a given function to get an iterator."""
def __init__(self, function):
self._function = function
def __iter__(self):
return self._function()
class ModelChoiceField(forms.Field):
default_error_messages = {
'invalid_choice': _(u'Please select a valid choice. '
u'That choice is not one of the available choices.'),
}
def __init__(self, reference_class, query=None, choices=None,
empty_label=u'---------',
required=True, widget=forms.Select, label=None, initial=None,
help_text=None, *args, **kwargs):
"""Constructor.
Args:
reference_class: required; the db.Model subclass used in the reference
query: optional db.Query; default db.Query(reference_class)
choices: optional explicit list of (value, label) pairs representing
available choices; defaults to dynamically iterating over the
query argument (or its default)
empty_label: label to be used for the default selection item in
the widget; this is prepended to the choices
required, widget, label, initial, help_text, *args, **kwargs:
like for forms.Field.__init__(); widget defaults to forms.Select
"""
assert issubclass(reference_class, db.Model)
if query is None:
query = db.Query(reference_class)
assert isinstance(query, db.Query)
super(ModelChoiceField, self).__init__(required, widget, label, initial,
help_text, *args, **kwargs)
self.empty_label = empty_label
self.reference_class = reference_class
self._query = query
self._choices = choices
self._update_widget_choices()
def _update_widget_choices(self):
"""Helper to copy the choices to the widget."""
self.widget.choices = self.choices
def _get_query(self):
"""Getter for the query attribute."""
return self._query
def _set_query(self, query):
"""Setter for the query attribute.
As a side effect, the widget's choices are updated.
"""
self._query = query
self._update_widget_choices()
query = property(_get_query, _set_query)
def _generate_choices(self):
"""Generator yielding (key, label) pairs from the query results."""
yield ('', self.empty_label)
for inst in self._query:
yield (inst.key(), unicode(inst))
def _get_choices(self):
"""Getter for the choices attribute.
This is required to return an object that can be iterated over
multiple times.
"""
if self._choices is not None:
return self._choices
return _WrapIter(self._generate_choices)
def _set_choices(self, choices):
"""Setter for the choices attribute.
As a side effect, the widget's choices are updated.
"""
self._choices = choices
self._update_widget_choices()
choices = property(_get_choices, _set_choices)
def clean(self, value):
"""Override Field.clean() to do reference-specific value cleaning.
This turns a non-empty value into a model instance.
"""
value = super(ModelChoiceField, self).clean(value)
if not value:
return None
instance = db.get(value)
if instance is None:
raise db.BadValueError(self.error_messages['invalid_choice'])
return instance
class ReferenceProperty(db.ReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reference property.
This defaults to a ModelChoiceField instance.
"""
defaults = {'form_class': ModelChoiceField,
'reference_class': self.reference_class}
defaults.update(kwargs)
return super(ReferenceProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This return the key object for the referenced object, or None.
"""
value = super(ReferenceProperty, self).get_value_for_form(instance)
if value is not None:
value = value.key()
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This turns a key string or object into a model instance.
"""
if value:
if not isinstance(value, db.Model):
value = db.get(value)
return value
class _ReverseReferenceProperty(db._ReverseReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reverse reference.
This always returns None, since reverse references are always
automatic.
"""
return None
def property_clean(prop, value):
"""Apply Property level validation to value.
Calls .make_value_from_form() and .validate() on the property and catches
exceptions generated by either. The exceptions are converted to
forms.ValidationError exceptions.
Args:
prop: The property to validate against.
value: The value to validate.
Raises:
forms.ValidationError if the value cannot be validated.
"""
if value is not None:
try:
prop.validate(prop.make_value_from_form(value))
except (db.BadValueError, ValueError), e:
raise forms.ValidationError(unicode(e))
class ModelFormOptions(object):
"""A simple class to hold internal options for a ModelForm class.
Instance attributes:
model: a db.Model class, or None
fields: list of field names to be defined, or None
exclude: list of field names to be skipped, or None
These instance attributes are copied from the 'Meta' class that is
usually present in a ModelForm class, and all default to None.
"""
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
class ModelFormMetaclass(type):
"""The metaclass for the ModelForm class defined below.
This is our analog of Django's own ModelFormMetaclass. (We
can't conveniently subclass that class because there are quite a few
differences.)
See the docs for ModelForm below for a usage example.
"""
def __new__(cls, class_name, bases, attrs):
"""Constructor for a new ModelForm class instance.
The signature of this method is determined by Python internals.
All Django Field instances are removed from attrs and added to
the base_fields attribute instead. Additional Field instances
are added to this based on the Datastore Model class specified
by the Meta attribute.
"""
fields = sorted(((field_name, attrs.pop(field_name))
for field_name, obj in attrs.items()
if isinstance(obj, forms.Field)),
key=lambda obj: obj[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
declared_fields = django.utils.datastructures.SortedDict()
for field_name, obj in fields:
declared_fields[field_name] = obj
opts = ModelFormOptions(attrs.get('Meta', None))
attrs['_meta'] = opts
base_models = []
for base in bases:
base_opts = getattr(base, '_meta', None)
base_model = getattr(base_opts, 'model', None)
if base_model is not None:
base_models.append(base_model)
if len(base_models) > 1:
raise django.core.exceptions.ImproperlyConfigured(
"%s's base classes define more than one model." % class_name)
if opts.model is not None:
if base_models and base_models[0] is not opts.model:
raise django.core.exceptions.ImproperlyConfigured(
'%s defines a different model than its parent.' % class_name)
model_fields = django.utils.datastructures.SortedDict()
for name, prop in sorted(opts.model.properties().iteritems(),
key=lambda prop: prop[1].creation_counter):
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
form_field = prop.get_form_field()
if form_field is not None:
model_fields[name] = form_field
model_fields.update(declared_fields)
attrs['base_fields'] = model_fields
props = opts.model.properties()
for name, field in model_fields.iteritems():
prop = props.get(name)
if prop:
def clean_for_property_field(value, prop=prop, old_clean=field.clean):
value = old_clean(value)
property_clean(prop, value)
return value
field.clean = clean_for_property_field
else:
attrs['base_fields'] = declared_fields
return super(ModelFormMetaclass, cls).__new__(cls,
class_name, bases, attrs)
class BaseModelForm(forms.BaseForm):
"""Base class for ModelForm.
This overrides the forms.BaseForm constructor and adds a save() method.
This class does not have a special metaclass; the magic metaclass is
added by the subclass ModelForm.
"""
def __init__(self, data=None, files=None, auto_id=None, prefix=None,
initial=None, error_class=None, label_suffix=None,
instance=None):
"""Constructor.
Args (all optional and defaulting to None):
data: dict of data values, typically from a POST request)
files: dict of file upload values; Django 0.97 or later only
auto_id, prefix: see Django documentation
initial: dict of initial values
error_class, label_suffix: see Django 0.97 or later documentation
instance: Model instance to be used for additional initial values
Except for initial and instance, these arguments are passed on to
the forms.BaseForm constructor unchanged, but only if not None.
Some arguments (files, error_class, label_suffix) are only
supported by Django 0.97 or later. Leave these blank (i.e. None)
when using Django 0.96. Their default values will be used with
Django 0.97 or later even when they are explicitly set to None.
"""
opts = self._meta
self.instance = instance
object_data = {}
if instance is not None:
for name, prop in instance.properties().iteritems():
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
object_data[name] = prop.get_value_for_form(instance)
if initial is not None:
object_data.update(initial)
kwargs = dict(data=data, files=files, auto_id=auto_id,
prefix=prefix, initial=object_data,
error_class=error_class, label_suffix=label_suffix)
kwargs = dict((name, value)
for name, value in kwargs.iteritems()
if value is not None)
super(BaseModelForm, self).__init__(**kwargs)
def save(self, commit=True):
"""Save this form's cleaned data into a model instance.
Args:
commit: optional bool, default True; if true, the model instance
is also saved to the datastore.
Returns:
A model instance. If a model instance was already associated
with this form instance (either passed to the constructor with
instance=... or by a previous save() call), that same instance
is updated and returned; if no instance was associated yet, one
is created by this call.
Raises:
ValueError if the data couldn't be validated.
"""
if not self.is_bound:
raise ValueError('Cannot save an unbound form')
opts = self._meta
instance = self.instance
if instance is None:
fail_message = 'created'
else:
fail_message = 'updated'
if self.errors:
raise ValueError("The %s could not be %s because the data didn't "
'validate.' % (opts.model.kind(), fail_message))
cleaned_data = self._cleaned_data()
converted_data = {}
propiter = itertools.chain(
opts.model.properties().iteritems(),
iter([('key_name', StringProperty(name='key_name'))])
)
for name, prop in propiter:
value = cleaned_data.get(name)
if value is not None:
converted_data[name] = prop.make_value_from_form(value)
try:
if instance is None:
instance = opts.model(**converted_data)
self.instance = instance
else:
for name, value in converted_data.iteritems():
if name == 'key_name':
continue
setattr(instance, name, value)
except db.BadValueError, err:
raise ValueError('The %s could not be %s (%s)' %
(opts.model.kind(), fail_message, err))
if commit:
instance.put()
return instance
def _cleaned_data(self):
"""Helper to retrieve the cleaned data attribute.
In Django 0.96 this attribute was called self.clean_data. In 0.97
and later it's been renamed to self.cleaned_data, to avoid a name
conflict. This helper abstracts the difference between the
versions away from its caller.
"""
try:
return self.cleaned_data
except AttributeError:
return self.clean_data
class ModelForm(BaseModelForm):
"""A Django form tied to a Datastore model.
Note that this particular class just sets the metaclass; all other
functionality is defined in the base class, BaseModelForm, above.
Usage example:
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
# First, define a model class
class MyModel(db.Model):
foo = db.StringProperty()
bar = db.IntegerProperty(required=True, default=42)
# Now define a form class
class MyForm(djangoforms.ModelForm):
class Meta:
model = MyModel
You can now instantiate MyForm without arguments to create an
unbound form, or with data from a POST request to create a bound
form. You can also pass a model instance with the instance=...
keyword argument to create an unbound (!) form whose initial values
are taken from the instance. For bound forms, use the save() method
to return a model instance.
Like Django's own corresponding ModelForm class, the nested Meta
class can have two other attributes:
fields: if present and non-empty, a list of field names to be
included in the form; properties not listed here are
excluded from the form
exclude: if present and non-empty, a list of field names to be
excluded from the form
If exclude and fields are both non-empty, names occurring in both
are excluded (i.e. exclude wins). By default all property in the
model have a corresponding form field defined.
It is also possible to define form fields explicitly. This gives
more control over the widget used, constraints, initial value, and
so on. Such form fields are not affected by the nested Meta class's
fields and exclude attributes.
If you define a form field named 'key_name' it will be treated
specially and will be used as the value for the key_name parameter
to the Model constructor. This allows you to create instances with
named keys. The 'key_name' field will be ignored when updating an
instance (although it will still be shown on the form).
"""
__metaclass__ = ModelFormMetaclass
| 34.514092 | 80 | 0.653459 |
7914383ad27820dbe2280658d4c8903994d2f2c4 | 2,623 | py | Python | scripts/readme.py | abdullahzamanbabar/syntribos | 2d0a6344fe14c8edc6c5c1eba7adbedc154ff579 | [
"Apache-2.0"
] | 277 | 2015-09-23T22:55:16.000Z | 2020-05-17T18:45:46.000Z | scripts/readme.py | abdullahzamanbabar/syntribos | 2d0a6344fe14c8edc6c5c1eba7adbedc154ff579 | [
"Apache-2.0"
] | null | null | null | scripts/readme.py | abdullahzamanbabar/syntribos | 2d0a6344fe14c8edc6c5c1eba7adbedc154ff579 | [
"Apache-2.0"
] | 72 | 2016-01-04T18:57:06.000Z | 2020-05-07T14:07:30.000Z | #!/usr/bin/env python
# Copyright 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
repository_tags = """
========================
Team and repository tags
========================
.. image:: https://governance.openstack.org/tc/badges/syntribos.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. image:: https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat
:target: https://docs.openstack.org/syntribos/latest/
.. image:: https://img.shields.io/pypi/v/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/pypi/pyversions/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/pypi/wheel/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/irc/%23openstack-security.png
:target: https://webchat.freenode.net/?channels=openstack-security
"""
def find_docs():
"""Yields files as per the whitelist."""
loc = "../doc/source/{}.rst"
whitelist = [
"about", "installation",
"configuration", "commands",
"running", "logging",
"test-anatomy", "unittests",
"contributing"]
for fname in whitelist:
fpath = loc.format(fname)
if os.path.isfile(fpath):
yield fpath
def concat_docs():
"""Concatinates files yielded by the generator `find_docs`."""
file_path = os.path.dirname(os.path.realpath(__file__))
head, tail = os.path.split(file_path)
outfile = head + "/README.rst"
if not os.path.isfile(outfile):
print("../README.rst not found, exiting!")
exit(1)
with open(outfile, 'w') as readme_handle:
readme_handle.write(repository_tags)
for doc in find_docs():
with open(doc, 'r') as doc_handle:
for line in doc_handle:
readme_handle.write(line)
readme_handle.write("\n")
if __name__ == '__main__':
"""Generate README.rst from docs."""
concat_docs()
print("\nREADME.rst created!\n")
| 31.987805 | 78 | 0.65345 |
d3236f61b5c909a7e161434a1a1ebe6ca04e13a8 | 12,575 | py | Python | ryu/services/protocols/bgp/application.py | jil7/ryu | 03c67d368dfa19bba6f070b060fb15aace4dd703 | [
"Apache-2.0"
] | 9 | 2018-04-11T12:53:08.000Z | 2021-12-14T01:41:22.000Z | ryu/services/protocols/bgp/application.py | jil7/ryu | 03c67d368dfa19bba6f070b060fb15aace4dd703 | [
"Apache-2.0"
] | 1 | 2019-05-20T13:23:28.000Z | 2020-12-20T09:06:52.000Z | ryu/services/protocols/bgp/application.py | jil7/ryu | 03c67d368dfa19bba6f070b060fb15aace4dd703 | [
"Apache-2.0"
] | 2 | 2020-10-20T13:52:45.000Z | 2021-06-26T02:21:58.000Z | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines bases classes to create a BGP application.
"""
import logging
import os
from ryu import cfg
from ryu.lib import hub
from ryu.utils import load_source
from ryu.base.app_manager import RyuApp
from ryu.controller.event import EventBase
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import BIN_ERROR
from ryu.services.protocols.bgp.bgpspeaker import BGPSpeaker
from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER
from ryu.services.protocols.bgp.net_ctrl import NC_RPC_BIND_IP
from ryu.services.protocols.bgp.net_ctrl import NC_RPC_BIND_PORT
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import (
DEFAULT_REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_STALEPATH_TIME)
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
from ryu.services.protocols.bgp.rtconf.common import LOCAL_PREF
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LOCAL_PREF
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6
LOG = logging.getLogger('bgpspeaker.application')
CONF = cfg.CONF['bgp-app']
@add_bgp_error_metadata(code=BIN_ERROR,
sub_code=1,
def_desc='Unknown bootstrap exception.')
class ApplicationException(BGPSException):
"""
Specific Base exception related to `BSPSpeaker`.
"""
pass
def validate_rpc_host(ip):
"""
Validates the given ip for use as RPC server address.
"""
if not is_valid_ipv4(ip) and not is_valid_ipv6(ip):
raise ApplicationException(
desc='Invalid RPC ip address: %s' % ip)
return ip
def load_config(config_file):
"""
Validates the given file for use as the settings file for BGPSpeaker
and loads the configuration from the given file as a module instance.
"""
if not config_file or not os.path.isfile(config_file):
raise ApplicationException(
desc='Invalid configuration file: %s' % config_file)
# Loads the configuration from the given file, if available.
try:
return load_source('bgpspeaker.application.settings', config_file)
except Exception as e:
raise ApplicationException(desc=str(e))
class EventBestPathChanged(EventBase):
"""
Event called when any best remote path is changed due to UPDATE messages
or remote peer's down.
This event is the wrapper for ``best_path_change_handler`` of
``bgpspeaker.BGPSpeaker``.
``path`` attribute contains an instance of ``info_base.base.Path``
subclasses.
If ``is_withdraw`` attribute is ``True``, ``path`` attribute has the
information of the withdraw route.
"""
def __init__(self, path, is_withdraw):
super(EventBestPathChanged, self).__init__()
self.path = path
self.is_withdraw = is_withdraw
class EventPeerDown(EventBase):
"""
Event called when the session to the remote peer goes down.
This event is the wrapper for ``peer_down_handler`` of
``bgpspeaker.BGPSpeaker``.
``remote_ip`` attribute is the IP address of the remote peer.
``remote_as`` attribute is the AS number of the remote peer.
"""
def __init__(self, remote_ip, remote_as):
super(EventPeerDown, self).__init__()
self.remote_ip = remote_ip
self.remote_as = remote_as
class EventPeerUp(EventBase):
"""
Event called when the session to the remote peer goes up.
This event is the wrapper for ``peer_up_handler`` of
``bgpspeaker.BGPSpeaker``.
``remote_ip`` attribute is the IP address of the remote peer.
``remote_as`` attribute is the AS number of the remote peer.
"""
def __init__(self, remote_ip, remote_as):
super(EventPeerUp, self).__init__()
self.remote_ip = remote_ip
self.remote_as = remote_as
class RyuBGPSpeaker(RyuApp):
"""
Base application for implementing BGP applications.
This application will notifies
- ``EventBestPathChanged``
- ``EventPeerDown``
- ``EventPeerUp``
to other BGP applications.
To catch these events, specify ``@set_ev_cls()`` decorator to the event
handlers in the Ryu applications.
Example::
...
from ryu.base import app_manager
from ryu.controller.handler import set_ev_cls
from ryu.services.protocols.bgp import application as bgp_application
...
class MyBGPApp(app_manager.RyuApp):
_CONTEXTS = {
'ryubgpspeaker': bgp_application.RyuBGPSpeaker,
}
...
@set_ev_cls(bgp_application.EventBestPathChanged)
def _best_patch_changed_handler(self, ev):
self.logger.info(
'Best path changed: is_withdraw=%s, path=%s',
ev.is_withdraw, ev.path)
"""
_EVENTS = [
EventBestPathChanged,
EventPeerDown,
EventPeerUp,
]
def __init__(self, *args, **kwargs):
super(RyuBGPSpeaker, self).__init__(*args, **kwargs)
self.config_file = CONF.config_file
# BGPSpeaker instance (not instantiated yet)
self.speaker = None
def start(self):
super(RyuBGPSpeaker, self).start()
# If configuration file was provided and loaded successfully, we start
# BGPSpeaker using the given settings.
# If no configuration file is provided or if any minimum required
# setting is missing, BGPSpeaker will not be started.
if self.config_file:
LOG.debug('Loading config file %s...', self.config_file)
settings = load_config(self.config_file)
# Configure logging settings, if available.
if hasattr(settings, 'LOGGING'):
# Not implemented yet.
LOG.debug('Loading LOGGING settings... (NOT implemented yet)')
# from logging.config import dictConfig
# logging_settings = dictConfig(settings.LOGGING)
# Configure BGP settings, if available.
if hasattr(settings, 'BGP'):
LOG.debug('Loading BGP settings...')
self._start_speaker(settings.BGP)
# Configure SSH settings, if available.
if hasattr(settings, 'SSH'):
LOG.debug('Loading SSH settings...')
# Note: paramiko used in bgp.operator.ssh is the optional
# requirements, imports bgp.operator.ssh here.
from ryu.services.protocols.bgp.operator import ssh
hub.spawn(ssh.SSH_CLI_CONTROLLER.start, **settings.SSH)
# Start RPC server with the given RPC settings.
rpc_settings = {
NC_RPC_BIND_PORT: CONF.rpc_port,
NC_RPC_BIND_IP: validate_rpc_host(CONF.rpc_host),
}
return hub.spawn(NET_CONTROLLER.start, **rpc_settings)
def _start_speaker(self, settings):
"""
Starts BGPSpeaker using the given settings.
"""
# Settings for starting BGPSpeaker
bgp_settings = {}
# Get required settings.
try:
bgp_settings['as_number'] = settings.get(LOCAL_AS)
bgp_settings['router_id'] = settings.get(ROUTER_ID)
except KeyError as e:
raise ApplicationException(
desc='Required BGP configuration missing: %s' % e)
# Set event notify handlers if no corresponding handler specified.
bgp_settings['best_path_change_handler'] = settings.get(
'best_path_change_handler', self._notify_best_path_changed_event)
bgp_settings['peer_down_handler'] = settings.get(
'peer_down_handler', self._notify_peer_down_event)
bgp_settings['peer_up_handler'] = settings.get(
'peer_up_handler', self._notify_peer_up_event)
# Get optional settings.
bgp_settings[BGP_SERVER_PORT] = settings.get(
BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT)
bgp_settings[REFRESH_STALEPATH_TIME] = settings.get(
REFRESH_STALEPATH_TIME, DEFAULT_REFRESH_STALEPATH_TIME)
bgp_settings[REFRESH_MAX_EOR_TIME] = settings.get(
REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_MAX_EOR_TIME)
bgp_settings[LABEL_RANGE] = settings.get(
LABEL_RANGE, DEFAULT_LABEL_RANGE)
bgp_settings['allow_local_as_in_count'] = settings.get(
'allow_local_as_in_count', 0)
bgp_settings[LOCAL_PREF] = settings.get(
LOCAL_PREF, DEFAULT_LOCAL_PREF)
# Create BGPSpeaker instance.
LOG.debug('Starting BGPSpeaker...')
self.speaker = BGPSpeaker(**bgp_settings)
# Add neighbors.
LOG.debug('Adding neighbors...')
self._add_neighbors(settings.get('neighbors', []))
# Add VRFs.
LOG.debug('Adding VRFs...')
self._add_vrfs(settings.get('vrfs', []))
# Add Networks
LOG.debug('Adding routes...')
self._add_routes(settings.get('routes', []))
def _notify_best_path_changed_event(self, ev):
ev = EventBestPathChanged(ev.path, ev.is_withdraw)
self.send_event_to_observers(ev)
def _notify_peer_down_event(self, remote_ip, remote_as):
ev = EventPeerDown(remote_ip, remote_as)
self.send_event_to_observers(ev)
def _notify_peer_up_event(self, remote_ip, remote_as):
ev = EventPeerUp(remote_ip, remote_as)
self.send_event_to_observers(ev)
def _add_neighbors(self, settings):
"""
Add BGP neighbors from the given settings.
All valid neighbors are loaded.
Miss-configured neighbors are ignored and errors are logged.
"""
for neighbor_settings in settings:
LOG.debug('Adding neighbor settings: %s', neighbor_settings)
try:
self.speaker.neighbor_add(**neighbor_settings)
except RuntimeConfigError as e:
LOG.exception(e)
def _add_vrfs(self, settings):
"""
Add BGP VRFs from the given settings.
All valid VRFs are loaded.
Miss-configured VRFs are ignored and errors are logged.
"""
for vrf_settings in settings:
LOG.debug('Adding VRF settings: %s', vrf_settings)
try:
self.speaker.vrf_add(**vrf_settings)
except RuntimeConfigError as e:
LOG.exception(e)
def _add_routes(self, settings):
"""
Add BGP routes from given settings.
All valid routes are loaded.
Miss-configured routes are ignored and errors are logged.
"""
for route_settings in settings:
if 'prefix' in route_settings:
prefix_add = self.speaker.prefix_add
elif 'route_type' in route_settings:
prefix_add = self.speaker.evpn_prefix_add
elif 'flowspec_family' in route_settings:
prefix_add = self.speaker.flowspec_prefix_add
else:
LOG.debug('Skip invalid route settings: %s', route_settings)
continue
LOG.debug('Adding route settings: %s', route_settings)
try:
prefix_add(**route_settings)
except RuntimeConfigError as e:
LOG.exception(e)
| 36.031519 | 78 | 0.668628 |
fe3532e34684f36e33d0ca8bdf0687e250f8c86b | 4,070 | py | Python | qcodes/tests/drivers/test_keysight_34934a.py | LGruenhaupt/Qcodes | ffb74dae53c13c4885e61b5a2df3f833d524de04 | [
"MIT"
] | 223 | 2016-10-29T15:00:24.000Z | 2022-03-20T06:53:34.000Z | qcodes/tests/drivers/test_keysight_34934a.py | LGruenhaupt/Qcodes | ffb74dae53c13c4885e61b5a2df3f833d524de04 | [
"MIT"
] | 3,406 | 2016-10-25T10:44:50.000Z | 2022-03-31T09:47:35.000Z | qcodes/tests/drivers/test_keysight_34934a.py | nikhartman/Qcodes | 042c5e25ab9e40b20c316b4055c4842844834d1e | [
"MIT"
] | 263 | 2016-10-25T11:35:36.000Z | 2022-03-31T08:53:20.000Z | # pylint: disable=redefined-outer-name
import pytest
from hypothesis import given
import hypothesis.strategies as st
from qcodes.instrument_drivers.Keysight.keysight_34980a import Keysight34980A
from qcodes.instrument_drivers.Keysight.keysight_34934a import Keysight34934A
import qcodes.instrument.sims as sims
VISALIB = sims.__file__.replace('__init__.py', 'keysight_34980A.yaml@sim')
@pytest.fixture(scope="module")
def switch_driver():
inst = Keysight34980A('keysight_34980A_sim',
address='GPIB::1::INSTR',
visalib=VISALIB)
try:
yield inst
finally:
inst.close()
def test_protection_mode(switch_driver):
"""
to check the protection mode (34934A module only)
"""
assert switch_driver.module[1].protection_mode() == 'AUTO100'
def test_connection(switch_driver):
"""
to check if a channel is closed or open
"""
assert not switch_driver.module[1].is_closed(2, 3)
assert switch_driver.module[1].is_open(2, 3)
# The following is to test the numbering function for the module 34934A
# the 'g' functions are copied from the table on P168 of the 34934A User's Guide
# the 'f' function is a simplified version, see the keysight34934A class for
# detail
@given(
st.sampled_from(("M1H", "M1L", "M2H", "M2L")),
st.integers(1, 4),
st.integers(1, 32)
)
def test_4x32(config, row, column):
f = Keysight34934A.get_numbering_function(4, 32, config)
g = numbering_function_4x32(config)
assert f(row, column) == g(row, column)
@given(
st.sampled_from(("MH", "ML")),
st.integers(1, 4),
st.integers(1, 64)
)
def test_4x64(config, row, column):
f = Keysight34934A.get_numbering_function(4, 64, config)
g = numbering_function_4x64(config)
assert f(row, column) == g(row, column)
@given(
st.integers(1, 4),
st.integers(1, 128)
)
def test_4x128(row, column):
f = Keysight34934A.get_numbering_function(4, 128)
g = numbering_function_4x128()
assert f(row, column) == g(row, column)
@given(
st.sampled_from(("MH", "ML")),
st.integers(1, 8),
st.integers(1, 32)
)
def test_8x32(config, row, column):
f = Keysight34934A.get_numbering_function(8, 32, config)
g = numbering_function_8x32(config)
assert f(row, column) == g(row, column)
@given(
st.integers(1, 8),
st.integers(1, 64)
)
def test_8x64(row, column):
f = Keysight34934A.get_numbering_function(8, 64)
g = numbering_function_8x64()
assert f(row, column) == g(row, column)
@given(
st.integers(1, 16),
st.integers(1, 32)
)
def test_16x32(row, column):
f = Keysight34934A.get_numbering_function(16, 32)
g = numbering_function_16x32()
assert f(row, column) == g(row, column)
def numbering_function_4x32(wiring_config):
offsets = {
"M1H": 0,
"M2H": 32,
"M1L": 64,
"M2L": 96
}
def numbering_function(row, col):
n = 100 * (2 * row - 1) + col + offsets[wiring_config]
return str(int(n))
return numbering_function
def numbering_function_4x64(wiring_config):
offsets = {
"MH": 0,
"ML": 64
}
def numbering_function(row, col):
n = 100 * (2 * row - 1) + col + offsets[wiring_config]
return str(int(n))
return numbering_function
def numbering_function_4x128():
def numbering_function(row, col):
n = 100 * (2 * row - 1) + col
return str(int(n))
return numbering_function
def numbering_function_8x32(wiring_config):
offsets = {
"MH": 0,
"ML": 32
}
def numbering_function(row, col):
n = 100 * row + col + offsets[wiring_config]
return str(int(n))
return numbering_function
def numbering_function_8x64():
def numbering_function(row, col):
n = 100 * row + col
return str(int(n))
return numbering_function
def numbering_function_16x32():
def numbering_function(row, col):
n = 50 * (row + 1) + col
return str(int(n))
return numbering_function
| 24.08284 | 80 | 0.649386 |
48e73c10325cabf003bfe13ba74921f282126674 | 3,062 | py | Python | tests/test_xmhw.py | Thomas-Moore-Creative/xmhw | 5c0db575fe0218d5f2c5189b2de85dabecc5c8cf | [
"Apache-2.0"
] | 6 | 2021-10-03T22:15:36.000Z | 2022-03-06T04:01:50.000Z | tests/test_xmhw.py | Thomas-Moore-Creative/xmhw | 5c0db575fe0218d5f2c5189b2de85dabecc5c8cf | [
"Apache-2.0"
] | 17 | 2021-05-28T00:48:59.000Z | 2022-03-29T21:36:09.000Z | tests/test_xmhw.py | Thomas-Moore-Creative/xmhw | 5c0db575fe0218d5f2c5189b2de85dabecc5c8cf | [
"Apache-2.0"
] | 3 | 2021-09-30T06:23:51.000Z | 2022-02-16T12:13:40.000Z | #!/usr/bin/env python
# Copyright 2020 ARC Centre of Excellence for Climate Extremes
# author: Paola Petrelli <paola.petrelli@utas.edu.au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xmhw.xmhw import threshold, detect
from xmhw_fixtures import *
from numpy import testing as nptest
from xmhw.exception import XmhwException
def test_threshold(clim_oisst, clim_oisst_nosmooth, oisst_ts):
# test exceptions with wrong arguments
with pytest.raises(XmhwException):
clim = threshold(oisst_ts, smoothPercentileWidth=6)
clim = threshold(oisst_ts, smoothPercentile=False, skipna=True)
th1 = clim['thresh'].sel(lat=-42.625, lon=148.125)
seas1 = clim['seas'].sel(lat=-42.625, lon=148.125)
th2 = clim['thresh'].sel(lat=-41.625, lon=148.375)
seas2 = clim['seas'].sel(lat=-41.625, lon=148.375)
#temporarily testing only after mid March so as to avoid the +-2 days from feb29
nptest.assert_array_almost_equal(clim_oisst_nosmooth.thresh1[60:].values,th1[60:].values)
nptest.assert_array_almost_equal(clim_oisst_nosmooth.thresh2[60:].values,th2[60:].values)
nptest.assert_array_almost_equal(clim_oisst_nosmooth.seas1[60:].values,seas1[60:].values, decimal=4)
nptest.assert_array_almost_equal(clim_oisst_nosmooth.seas2[60:].values,seas2[60:].values, decimal=4)
# test default smooth True
clim = threshold(oisst_ts, skipna=True)
th1 = clim['thresh'].sel(lat=-42.625, lon=148.125)
seas1 = clim['seas'].sel(lat=-42.625, lon=148.125)
th2 = clim['thresh'].sel(lat=-41.625, lon=148.375)
seas2 = clim['seas'].sel(lat=-41.625, lon=148.375)
#temporarily testing only after mid March so as to avoid the =-15 days from feb29
nptest.assert_array_almost_equal(clim_oisst.thresh1[82:].values,th1[82:].values)
nptest.assert_array_almost_equal(clim_oisst.thresh2[82:].values,th2[82:].values)
nptest.assert_array_almost_equal(clim_oisst.seas1[82:].values,seas1[82:].values, decimal=4)
nptest.assert_array_almost_equal(clim_oisst.seas2[82:].values,seas2[82:].values, decimal=4)
# add test with 1-dimensional and/or 2-dimensional arrays to make sure it still works
# add test with skipna False for this set and one without nans
def test_detect(oisst_ts, clim_oisst):
# detect(temp, thresh, seas, minDuration=5, joinAcrossGaps=True, maxGap=2, maxPadLength=None, coldSpells=False, tdim='time')
# test exceptions with wrong arguments
with pytest.raises(XmhwException):
mhw = detect(oisst_ts, clim_oisst.thresh2, clim_oisst.seas2, minDuration=3, maxGap=5)
| 52.793103 | 124 | 0.743632 |
4854e55c3bf64085accb32d032400f351069e200 | 1,595 | py | Python | setup.py | angryjoe/cookiecutter-django-foundation | 7abcfe253779c69f9d620a78dff826b2ad839977 | [
"BSD-3-Clause"
] | null | null | null | setup.py | angryjoe/cookiecutter-django-foundation | 7abcfe253779c69f9d620a78dff826b2ad839977 | [
"BSD-3-Clause"
] | null | null | null | setup.py | angryjoe/cookiecutter-django-foundation | 7abcfe253779c69f9d620a78dff826b2ad839977 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Our version ALWAYS matches the version of Django we support
# If Django has a new release, we branch, tag, then update this setting after the tag.
version = "2.2.4"
if sys.argv[-1] == "tag":
os.system('git tag -a %s -m "version %s"' % (version, version))
os.system("git push --tags")
sys.exit()
with open("README.rst") as readme_file:
long_description = readme_file.read()
setup(
name="cookiecutter-django",
version=version,
description="A Cookiecutter template for creating production-ready Django projects quickly.",
long_description=long_description,
author="Daniel Roy Greenfeld",
author_email="pydanny@gmail.com",
url="https://github.com/pydanny/cookiecutter-django",
packages=[],
license="BSD",
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Framework :: Django :: 2.0",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development",
],
keywords=(
"cookiecutter, Python, projects, project templates, django, "
"skeleton, scaffolding, project directory, setup.py"
),
)
| 30.673077 | 97 | 0.648903 |
9b42ff6b2b77aef95935952e89b7203008699d60 | 6,761 | py | Python | mask_the_face.py | shhommychon/WrongMaskTheFace | 9950988e6fa2ec395af8c2ef0682d47139402181 | [
"MIT"
] | null | null | null | mask_the_face.py | shhommychon/WrongMaskTheFace | 9950988e6fa2ec395af8c2ef0682d47139402181 | [
"MIT"
] | null | null | null | mask_the_face.py | shhommychon/WrongMaskTheFace | 9950988e6fa2ec395af8c2ef0682d47139402181 | [
"MIT"
] | null | null | null | # Author: aqeelanwar
# Created: 27 April,2020, 10:22 PM
# Email: aqeel.anwar@gatech.edu
import argparse
import dlib
from utils.aux_functions import *
# Command-line input setup
parser = argparse.ArgumentParser(
description="MaskTheFace - Python code to mask faces dataset"
)
parser.add_argument(
"--path",
type=str,
default="",
help="Path to either the folder containing images or the image itself",
)
parser.add_argument(
"--mask_type",
type=str,
default="surgical",
choices=["surgical", "N95", "KN95", "cloth", "gas", "inpaint", "random", "all"],
help="Type of the mask to be applied. Available options: all, surgical_blue, surgical_green, N95, cloth",
)
parser.add_argument(
"--pattern",
type=str,
default="",
help="Type of the pattern. Available options in masks/textures",
)
parser.add_argument(
"--pattern_weight",
type=float,
default=0.5,
help="Weight of the pattern. Must be between 0 and 1",
)
parser.add_argument(
"--color",
type=str,
default="#0473e2",
help="Hex color value that need to be overlayed to the mask",
)
parser.add_argument(
"--color_weight",
type=float,
default=0.5,
help="Weight of the color intensity. Must be between 0 and 1",
)
parser.add_argument(
"--wear_type",
type=str,
default="normal",
choices=["normal", "chin_mask", "nose_mask", "eye_mask"],
help="Type of masking to be applied. Available options: normal, chin_mask, nose_mask, eye_mask",
)
parser.add_argument(
"--code",
type=str,
# default="cloth-masks/textures/check/check_4.jpg, cloth-#e54294, cloth-#ff0000, cloth, cloth-masks/textures/others/heart_1.png, cloth-masks/textures/fruits/pineapple.png, N95, surgical_blue, surgical_green",
default="",
help="Generate specific formats",
)
parser.add_argument(
"--verbose", dest="verbose", action="store_true", help="Turn verbosity on"
)
parser.add_argument(
"--write_original_image",
dest="write_original_image",
action="store_true",
help="If true, original image is also stored in the masked folder",
)
parser.set_defaults(feature=False)
args = parser.parse_args()
args.write_path = args.path + "_masked"
# Set up dlib face detector and predictor
args.detector = dlib.get_frontal_face_detector()
path_to_dlib_model = "dlib_models/shape_predictor_68_face_landmarks.dat"
if not os.path.exists(path_to_dlib_model):
download_dlib_model()
args.predictor = dlib.shape_predictor(path_to_dlib_model)
# Extract data from code
mask_code = "".join(args.code.split()).split(",")
args.code_count = np.zeros(len(mask_code))
args.mask_dict_of_dict = {}
for i, entry in enumerate(mask_code):
mask_dict = {}
mask_color = ""
mask_texture = ""
mask_type = entry.split("-")[0]
if len(entry.split("-")) == 2:
mask_variation = entry.split("-")[1]
if "#" in mask_variation:
mask_color = mask_variation
else:
mask_texture = mask_variation
mask_dict["type"] = mask_type
mask_dict["color"] = mask_color
mask_dict["texture"] = mask_texture
args.mask_dict_of_dict[i] = mask_dict
# Check if path is file or directory or none
is_directory, is_file, is_other = check_path(args.path)
display_MaskTheFace()
if is_directory:
path, dirs, files = os.walk(args.path).__next__()
file_count = len(files)
dirs_count = len(dirs)
if len(files) > 0:
print_orderly("Masking image files", 60)
# Process files in the directory if any
for f in tqdm(files):
image_path = path + "/" + f
write_path = path + "_masked"
if not os.path.isdir(write_path):
os.makedirs(write_path)
if is_image(image_path):
# Proceed if file is image
if args.verbose:
str_p = "Processing: " + image_path
tqdm.write(str_p)
split_path = f.rsplit(".")
masked_image, mask, mask_binary_array, original_image = mask_image(
image_path, args
)
for i in range(len(mask)):
w_path = (
write_path
+ "/"
+ split_path[0]
+ "_"
+ mask[i]
+ "."
+ split_path[1]
)
img = masked_image[i]
cv2.imwrite(w_path, img)
print_orderly("Masking image directories", 60)
# Process directories withing the path provided
for d in tqdm(dirs):
dir_path = args.path + "/" + d
dir_write_path = args.write_path + "/" + d
if not os.path.isdir(dir_write_path):
os.makedirs(dir_write_path)
_, _, files = os.walk(dir_path).__next__()
# Process each files within subdirectory
for f in files:
image_path = dir_path + "/" + f
if args.verbose:
str_p = "Processing: " + image_path
tqdm.write(str_p)
write_path = dir_write_path
if is_image(image_path):
# Proceed if file is image
split_path = f.rsplit(".")
masked_image, mask, mask_binary, original_image = mask_image(
image_path, args
)
for i in range(len(mask)):
w_path = (
write_path
+ "/"
+ split_path[0]
+ "_"
+ mask[i]
+ "."
+ split_path[1]
)
w_path_original = write_path + "/" + f
img = masked_image[i]
# Write the masked image
cv2.imwrite(w_path, img)
if args.write_original_image:
# Write the original image
cv2.imwrite(w_path_original, original_image)
if args.verbose:
print(args.code_count)
# Process if the path was a file
elif is_file:
print("Masking image file")
image_path = args.path
write_path = args.path.rsplit(".")[0]
if is_image(image_path):
# Proceed if file is image
# masked_images, mask, mask_binary_array, original_image
masked_image, mask, mask_binary_array, original_image = mask_image(
image_path, args
)
for i in range(len(mask)):
w_path = write_path + "_" + mask[i] + "." + args.path.rsplit(".")[1]
img = masked_image[i]
cv2.imwrite(w_path, img)
else:
print("Path is neither a valid file or a valid directory")
print("Processing Done")
| 30.318386 | 212 | 0.585121 |
67359e9688fb15bcfc59fb7ed473f5a38b6947ab | 3,779 | py | Python | cryspy/C_item_loop_classes/cl_1_setup.py | ikibalin/rhochi | 1ca03f18dc72006322a101ed877cdbba33ed61e7 | [
"MIT"
] | null | null | null | cryspy/C_item_loop_classes/cl_1_setup.py | ikibalin/rhochi | 1ca03f18dc72006322a101ed877cdbba33ed61e7 | [
"MIT"
] | null | null | null | cryspy/C_item_loop_classes/cl_1_setup.py | ikibalin/rhochi | 1ca03f18dc72006322a101ed877cdbba33ed61e7 | [
"MIT"
] | null | null | null | """Setup and SetupL classes."""
from typing import NoReturn
from cryspy.A_functions_base.function_1_objects import \
form_items_by_dictionary
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
class Setup(ItemN):
"""Experimental diffraction setup (constant wavelength).
Attributes
----------
- wavelength (mandatory) (in Angstrems)
- field (optional) (in Tesla)
- radiation (optional) (neutrons by default, or X-rays)
- offset_ttheta (optional for powder 1d and 2d) (in degrees)
- offset_phi (optional for powder 2d) (in degrees)
- ratio_lambdaover2 (optional, for single diffraction)
- k (0. for neutrons, 0.5 for characteristic X-ray, 0.1 for synchrotron radiation)
- cthm (cos**2 (2 theta_M)) (for calculation of Lorentrz polarization factor)
"""
ATTR_MANDATORY_NAMES = ()
ATTR_MANDATORY_TYPES = ()
ATTR_MANDATORY_CIF = ()
ATTR_OPTIONAL_NAMES = ("wavelength", "field", "offset_ttheta", "offset_phi", "offset_gamma", "offset_nu",
"ratio_lambdaover2", "radiation", "k", "cthm")
ATTR_OPTIONAL_TYPES = (float, float, float, float, float, float, float, str, float, float)
ATTR_OPTIONAL_CIF = ("wavelength", "field", "offset_2theta", "offset_phi", "offset_gamma", "offset_nu",
"ratio_lambda/2", "radiation", "K", "cthm")
ATTR_NAMES = ATTR_MANDATORY_NAMES + ATTR_OPTIONAL_NAMES
ATTR_TYPES = ATTR_MANDATORY_TYPES + ATTR_OPTIONAL_TYPES
ATTR_CIF = ATTR_MANDATORY_CIF + ATTR_OPTIONAL_CIF
ATTR_INT_NAMES = ()
ATTR_INT_PROTECTED_NAMES = ()
# parameters considered are refined parameters
ATTR_REF = ("wavelength", "offset_ttheta", "offset_phi", "offset_gamma", "offset_nu",
"ratio_lambdaover2")
ATTR_SIGMA = tuple([f"{_h:}_sigma" for _h in ATTR_REF])
ATTR_CONSTR_FLAG = tuple([f"{_h:}_constraint" for _h in ATTR_REF])
ATTR_REF_FLAG = tuple([f"{_h:}_refinement" for _h in ATTR_REF])
ATTR_CONSTR_MARK = tuple([f"{_h:}_mark" for _h in ATTR_REF])
# formats if cif format
D_FORMATS = {'wavelength': "{:.4f}", 'field': "{:.2f}",
'offset_ttheta': "{:.3f}", 'offset_phi': "{:.3f}",
'offset_gamma': "{:.3f}", 'offset_nu': "{:.3f}",
"ratio_lambdaover2": "{:.3f}", "k": "{:.1f}", "cthm": "{:.5f}"}
# constraints on the parameters
D_CONSTRAINTS = {"radiation": ["neutrons", "X-rays"]}
# default values for the parameters
D_DEFAULT = {"offset_2theta": 0., "radiation": "neutrons", "k":0., "cthm": 0.91}
for key in ATTR_SIGMA:
D_DEFAULT[key] = 0.
for key in (ATTR_CONSTR_FLAG + ATTR_REF_FLAG):
D_DEFAULT[key] = False
for key in ATTR_CONSTR_MARK:
D_DEFAULT[key] = ""
PREFIX = "setup"
def __init__(self, **kwargs) -> NoReturn:
super(Setup, self).__init__()
# defined for any integer and float parameters
D_MIN = {"wavelength": 0., "ratio_lambdaover2": 0.}
# defined for ani integer and float parameters
D_MAX = {"ratio_lambdaover2": 1.}
self.__dict__["D_MIN"] = D_MIN
self.__dict__["D_MAX"] = D_MAX
for key, attr in self.D_DEFAULT.items():
setattr(self, key, attr)
for key, attr in kwargs.items():
setattr(self, key, attr)
class SetupL(LoopN):
"""Experimental diffraction setup (constant wavelength).
"""
ITEM_CLASS = Setup
ATTR_INDEX = None
def __init__(self, loop_name: str = None, **kwargs) -> NoReturn:
super(SetupL, self).__init__()
self.__dict__["items"] = form_items_by_dictionary(self.ITEM_CLASS, kwargs)
self.__dict__["loop_name"] = loop_name
| 38.171717 | 109 | 0.635089 |
e60034ea7b46e83b94613176e159bbee3cf0dcad | 348 | py | Python | Algorithms/Sorting/insertionsort1.py | ekant1999/HackerRank | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | [
"MIT"
] | 9 | 2017-03-19T16:27:31.000Z | 2022-02-17T11:42:21.000Z | Algorithms/Sorting/insertionsort1.py | ekant1999/HackerRank | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | [
"MIT"
] | null | null | null | Algorithms/Sorting/insertionsort1.py | ekant1999/HackerRank | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | [
"MIT"
] | 6 | 2019-02-18T11:26:24.000Z | 2022-03-21T14:13:15.000Z | #!/bin/python
def insertionSort(ar):
e = ar[m-1]
pos = m-2
while ar[pos] > e and pos>=0:
ar[pos+1] = ar[pos]
pos -= 1
print " ".join(str(ch) for ch in ar)
ar[pos+1] = e
print " ".join(str(ch) for ch in ar)
m = input()
ar = [int(i) for i in raw_input().strip().split()]
insertionSort(ar)
| 23.2 | 51 | 0.502874 |
ac90be96392a057ed93b9f175dd35acbfad3b716 | 5,726 | py | Python | example_tagging.py | Yizong98/Modified_Transfer_Model | 7ad226d6515f1c6ea6f679d6cf3cbcc066b30236 | [
"MIT"
] | null | null | null | example_tagging.py | Yizong98/Modified_Transfer_Model | 7ad226d6515f1c6ea6f679d6cf3cbcc066b30236 | [
"MIT"
] | null | null | null | example_tagging.py | Yizong98/Modified_Transfer_Model | 7ad226d6515f1c6ea6f679d6cf3cbcc066b30236 | [
"MIT"
] | null | null | null | from keras.layers import Input, Dense
from keras.models import Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU
from keras.utils.data_utils import get_file
from keras.layers import Input, Dense
import time
import numpy as np
from keras import backend as K
import audio_processor as ap
import pdb
##
def sort_result(tags, preds):
result = zip(tags, preds)
sorted_result = sorted(result, key=lambda x: x[1], reverse=True)
return [(name, '%5.3f' % score) for name, score in sorted_result]
def librosa_exists():
try:
__import__('librosa')
except ImportError:
return False
else:
return True
audio_paths = ['data/bensound-cute.mp3',
'data/bensound-actionable.mp3',
'data/bensound-dubstep.mp3',
'data/bensound-thejazzpiano.mp3']
melgram_paths = ['data/bensound-cute.npy',
'data/bensound-actionable.npy',
'data/bensound-dubstep.npy',
'data/bensound-thejazzpiano.npy']
tags = ['rock', 'pop', 'alternative', 'indie', 'electronic',
'female vocalists', 'dance', '00s', 'alternative rock', 'jazz',
'beautiful', 'metal', 'chillout', 'male vocalists',
'classic rock', 'soul', 'indie rock', 'Mellow', 'electronica',
'80s', 'folk', '90s', 'chill', 'instrumental', 'punk',
'oldies', 'blues', 'hard rock', 'ambient', 'acoustic',
'experimental', 'female vocalist', 'guitar', 'Hip-Hop',
'70s', 'party', 'country', 'easy listening',
'sexy', 'catchy', 'funk', 'electro', 'heavy metal',
'Progressive rock', '60s', 'rnb', 'indie pop',
'sad', 'House', 'happy']
# prepare data like this
melgrams = np.zeros((0, 1, 96, 1366))
if librosa_exists:
for audio_path in audio_paths:
melgram = ap.compute_melgram(audio_path)
melgrams = np.concatenate((melgrams, melgram), axis=0)
else:
for melgram_path in melgram_paths:
melgram = np.load(melgram_path)
melgrams = np.concatenate((melgrams, melgram), axis=0)
TH_WEIGHTS_PATH = 'https://github.com/keunwoochoi/music-auto_tagging-keras/blob/master/data/music_tagger_cnn_weights_theano.h5'
weights='msd'
input_tensor=None
include_top=True
if weights not in {'msd', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `msd` '
'(pre-training on Million Song Dataset).')
if K.image_dim_ordering() == 'th':
input_shape = (1, 96, 1366)
else:
input_shape = (96, 1366, 1)
if input_tensor is None:
melgram_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
melgram_input = Input(tensor=input_tensor, shape=input_shape)
else:
melgram_input = input_tensor
if K.image_dim_ordering() == 'th':
channel_axis = 1
freq_axis = 2
time_axis = 3
else:
channel_axis = 3
freq_axis = 1
time_axis = 2
x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(melgram_input)
x = Convolution2D(32, 3, 3, border_mode='same', name='conv1')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn1')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 4), name='pool1')(x)
x = Convolution2D(64, 3, 3, border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 4), name='pool2')(x)
x = Convolution2D(64, 3, 3, border_mode='same', name='conv3')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 4), name='pool3')(x)
x = Convolution2D(64, 3, 3, border_mode='same', name='conv4')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn4')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(3, 5), name='pool4')(x)
x = Convolution2D(32, 3, 3, border_mode='same', name='conv5')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn5')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), name='pool5')(x)
x = Flatten()(x)
if include_top:
x = Dense(50, activation='sigmoid', name='output')(x)
model = Model(melgram_input, x)
print (model)
# if weights is None:
# return model
# else:
# Load input
# if K.image_dim_ordering() == 'tf':
# raise RuntimeError("Please set image_dim_ordering == 'th'."
# "You can set it at ~/.keras/keras.json")
# model.load_weights('data/music_tagger_cnn_weights_%s.h5' % K._BACKEND,
# by_name=True)
# predict the tags like this
print('Predicting...')
start = time.time()
pred_tags = model.predict(melgrams)
# print like this...
# print "Prediction is done. It took %d seconds." % (time.time()-start)
print('Printing top-10 tags for each track...')
for song_idx, audio_path in enumerate(audio_paths):
sorted_result = sort_result(tags, pred_tags[song_idx, :].tolist())
print(audio_path)
print(sorted_result[:5])
print(sorted_result[5:10])
print(' ') | 36.941935 | 131 | 0.599197 |
53ba1c8e15d6d8a0f64542290ac645dd5080e8ef | 2,418 | py | Python | static/brythonlib/cs1media/__init__.py | pythonpad/vue-pythonpad-runner | 52decba9607b3b7b050ee0bf6dd4ef07ae644587 | [
"MIT"
] | 3 | 2021-01-26T16:18:45.000Z | 2021-09-15T00:57:12.000Z | static/brythonlib/cs1media/__init__.py | pythonpad/vue-pythonpad-runner | 52decba9607b3b7b050ee0bf6dd4ef07ae644587 | [
"MIT"
] | null | null | null | static/brythonlib/cs1media/__init__.py | pythonpad/vue-pythonpad-runner | 52decba9607b3b7b050ee0bf6dd4ef07ae644587 | [
"MIT"
] | 2 | 2021-01-26T16:18:47.000Z | 2021-10-21T20:45:20.000Z | import browser
from .picture import Picture
def create_picture(width, height, color=(0,0,0)):
global __media__
try:
if ('locked_picture' in __media__) and ('lock_create' in __media__) and __media__['lock_create']:
return __media__['locked_picture']
except NameError:
__media__ = {}
if width < 0 or height < 0:
raise ValueError('Invalid image dimensions: %s, %s' % (width, height))
picture = Picture(width, height, color=color)
if 'pictures' not in __media__:
__media__['pictures'] = [picture]
else:
__media__['pictures'].append(picture)
return picture
def load_picture(filename=None):
global __media__
try:
if 'locked_picture' in __media__:
return __media__['locked_picture']
except NameError:
__media__ = {}
if filename is None:
raise NotImplementedError('Pythonpad\'s cs1media does not support dynamic image file loading.')
if not browser.self.isFileExist(filename):
raise FileNotFoundError('No such file: \'%s\'' % filename)
file_dict = browser.self.getFileDict(filename)
if 'imageData' not in file_dict:
raise ValueError('Pre-extracted image data is not found. Be aware that cs1media in Pythonpad only supports loading an image file that already existed in pad\'s virtual file structure when the code is executed, only when cs1media is directly imported in main.py.')
picture = Picture(
file_dict['width'], file_dict['height'], data=file_dict['imageData'])
if 'pictures' not in __media__:
__media__['pictures'] = [picture]
else:
__media__['pictures'].append(picture)
return picture
def lock_picture(picture, lock_create=False):
global __media__
try:
__media__['locked_picture'] = picture
except NameError:
__media__ = {'locked_picture': picture}
if lock_create:
__media__['lock_create'] = True
def unlock_picture():
global __media__
try:
del __media__['locked_picture']
del __media__['lock_create']
except:
pass
def get_all_pictures():
try:
if 'pictures' in __media__:
return __media__['pictures']
else:
return []
except NameError:
return []
__all__ = [
'create_picture',
'load_picture',
'lock_picture',
'unlock_picture',
'get_all_pictures',
]
| 29.13253 | 271 | 0.654673 |
c4bfb44986929722bcebc3ca70ba5158f1ede8ba | 10,531 | py | Python | sdks/python/apache_beam/internal/pickler.py | rehmanmuradali/beam | de8ff705145cbbc41bea7750a0a5d3553924ab3a | [
"Apache-2.0"
] | 2 | 2017-12-19T18:34:54.000Z | 2019-05-14T21:50:06.000Z | sdks/python/apache_beam/internal/pickler.py | almamuncsit/beam | aa58e1e5db4af2a6f97520756831e87aa1d3e3fb | [
"Apache-2.0"
] | 9 | 2020-06-03T12:34:25.000Z | 2020-08-11T12:18:22.000Z | sdks/python/apache_beam/internal/pickler.py | almamuncsit/beam | aa58e1e5db4af2a6f97520756831e87aa1d3e3fb | [
"Apache-2.0"
] | 1 | 2020-11-11T18:45:54.000Z | 2020-11-11T18:45:54.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pickler for values, functions, and classes.
For internal use only. No backwards compatibility guarantees.
Pickles created by the pickling library contain non-ASCII characters, so
we base64-encode the results so that we can put them in a JSON objects.
The pickler is used to embed FlatMap callable objects into the workflow JSON
description.
The pickler module should be used to pickle functions and modules; for values,
the coders.*PickleCoder classes should be used instead.
"""
# pytype: skip-file
from __future__ import absolute_import
import base64
import bz2
import logging
import sys
import threading
import traceback
import types
from typing import Any
from typing import Dict
from typing import Tuple
import dill
class _NoOpContextManager(object):
def __enter__(self):
pass
def __exit__(self, *unused_exc_info):
pass
if sys.version_info[0] > 2:
# Pickling, especially unpickling, causes broken module imports on Python 3
# if executed concurrently, see: BEAM-8651, http://bugs.python.org/issue38884.
_pickle_lock_unless_py2 = threading.RLock()
else:
# Avoid slow reentrant locks on Py2. See: https://bugs.python.org/issue3001.
_pickle_lock_unless_py2 = _NoOpContextManager()
# Dill 0.28.0 renamed dill.dill to dill._dill:
# https://github.com/uqfoundation/dill/commit/f0972ecc7a41d0b8acada6042d557068cac69baa
# TODO: Remove this once Beam depends on dill >= 0.2.8
if not getattr(dill, 'dill', None):
dill.dill = dill._dill
sys.modules['dill.dill'] = dill._dill
# TODO: Remove once Dataflow has containers with a preinstalled dill >= 0.2.8
if not getattr(dill, '_dill', None):
dill._dill = dill.dill
sys.modules['dill._dill'] = dill.dill
def _is_nested_class(cls):
"""Returns true if argument is a class object that appears to be nested."""
return (
isinstance(cls, type) and cls.__module__ is not None and
cls.__module__ != 'builtins' # Python 3
and cls.__module__ != '__builtin__' # Python 2
and cls.__name__ not in sys.modules[cls.__module__].__dict__)
def _find_containing_class(nested_class):
"""Finds containing class of a nested class passed as argument."""
seen = set()
def _find_containing_class_inner(outer):
if outer in seen:
return None
seen.add(outer)
for k, v in outer.__dict__.items():
if v is nested_class:
return outer, k
elif isinstance(v, type) and hasattr(v, '__dict__'):
res = _find_containing_class_inner(v)
if res: return res
return _find_containing_class_inner(sys.modules[nested_class.__module__])
def _nested_type_wrapper(fun):
"""A wrapper for the standard pickler handler for class objects.
Args:
fun: Original pickler handler for type objects.
Returns:
A wrapper for type objects that handles nested classes.
The wrapper detects if an object being pickled is a nested class object.
For nested class object only it will save the containing class object so
the nested structure is recreated during unpickle.
"""
def wrapper(pickler, obj):
# When the nested class is defined in the __main__ module we do not have to
# do anything special because the pickler itself will save the constituent
# parts of the type (i.e., name, base classes, dictionary) and then
# recreate it during unpickling.
if _is_nested_class(obj) and obj.__module__ != '__main__':
containing_class_and_name = _find_containing_class(obj)
if containing_class_and_name is not None:
return pickler.save_reduce(getattr, containing_class_and_name, obj=obj)
try:
return fun(pickler, obj)
except dill.dill.PicklingError:
# pylint: disable=protected-access
return pickler.save_reduce(
dill.dill._create_type,
(
type(obj),
obj.__name__,
obj.__bases__,
dill.dill._dict_from_dictproxy(obj.__dict__)),
obj=obj)
# pylint: enable=protected-access
return wrapper
# Monkey patch the standard pickler dispatch table entry for type objects.
# Dill, for certain types, defers to the standard pickler (including type
# objects). We wrap the standard handler using type_wrapper() because
# for nested class we want to pickle the actual enclosing class object so we
# can recreate it during unpickling.
# TODO(silviuc): Make sure we submit the fix upstream to GitHub dill project.
dill.dill.Pickler.dispatch[type] = _nested_type_wrapper(
dill.dill.Pickler.dispatch[type])
# Dill pickles generators objects without complaint, but unpickling produces
# TypeError: object.__new__(generator) is not safe, use generator.__new__()
# on some versions of Python.
def _reject_generators(unused_pickler, unused_obj):
raise TypeError("can't (safely) pickle generator objects")
dill.dill.Pickler.dispatch[types.GeneratorType] = _reject_generators
# This if guards against dill not being full initialized when generating docs.
if 'save_module' in dir(dill.dill):
# Always pickle non-main modules by name.
old_save_module = dill.dill.save_module
@dill.dill.register(dill.dill.ModuleType)
def save_module(pickler, obj):
if dill.dill.is_dill(pickler) and obj is pickler._main:
return old_save_module(pickler, obj)
else:
dill.dill.log.info('M2: %s' % obj)
# pylint: disable=protected-access
pickler.save_reduce(dill.dill._import_module, (obj.__name__, ), obj=obj)
# pylint: enable=protected-access
dill.dill.log.info('# M2')
# Pickle module dictionaries (commonly found in lambda's globals)
# by referencing their module.
old_save_module_dict = dill.dill.save_module_dict
known_module_dicts = {
} # type: Dict[int, Tuple[types.ModuleType, Dict[str, Any]]]
@dill.dill.register(dict)
def new_save_module_dict(pickler, obj):
obj_id = id(obj)
if not known_module_dicts or '__file__' in obj or '__package__' in obj:
if obj_id not in known_module_dicts:
# Trigger loading of lazily loaded modules (such as pytest vendored
# modules).
# This pass over sys.modules needs to iterate on a copy of sys.modules
# since lazy loading modifies the dictionary, hence the use of list().
for m in list(sys.modules.values()):
try:
_ = m.__dict__
except AttributeError:
pass
for m in list(sys.modules.values()):
try:
if (m and m.__name__ != '__main__' and
isinstance(m, dill.dill.ModuleType)):
d = m.__dict__
known_module_dicts[id(d)] = m, d
except AttributeError:
# Skip modules that do not have the __name__ attribute.
pass
if obj_id in known_module_dicts and dill.dill.is_dill(pickler):
m = known_module_dicts[obj_id][0]
try:
# pylint: disable=protected-access
dill.dill._import_module(m.__name__)
return pickler.save_reduce(
getattr, (known_module_dicts[obj_id][0], '__dict__'), obj=obj)
except (ImportError, AttributeError):
return old_save_module_dict(pickler, obj)
else:
return old_save_module_dict(pickler, obj)
dill.dill.save_module_dict = new_save_module_dict
def _nest_dill_logging():
"""Prefix all dill logging with its depth in the callstack.
Useful for debugging pickling of deeply nested structures.
"""
old_log_info = dill.dill.log.info
def new_log_info(msg, *args, **kwargs):
old_log_info(
('1 2 3 4 5 6 7 8 9 0 ' * 10)[:len(traceback.extract_stack())] + msg,
*args,
**kwargs)
dill.dill.log.info = new_log_info
# Turn off verbose logging from the dill pickler.
logging.getLogger('dill').setLevel(logging.WARN)
def dumps(o, enable_trace=True):
# type: (...) -> bytes
"""For internal use only; no backwards-compatibility guarantees."""
with _pickle_lock_unless_py2:
try:
s = dill.dumps(o)
except Exception: # pylint: disable=broad-except
if enable_trace:
dill.dill._trace(True) # pylint: disable=protected-access
s = dill.dumps(o)
else:
raise
finally:
dill.dill._trace(False) # pylint: disable=protected-access
# Compress as compactly as possible (compresslevel=9) to decrease peak memory
# usage (of multiple in-memory copies) and to avoid hitting protocol buffer
# limits.
c = bz2.compress(s, compresslevel=9)
del s # Free up some possibly large and no-longer-needed memory.
return base64.b64encode(c)
def loads(encoded, enable_trace=True):
"""For internal use only; no backwards-compatibility guarantees."""
c = base64.b64decode(encoded)
s = bz2.decompress(c)
del c # Free up some possibly large and no-longer-needed memory.
with _pickle_lock_unless_py2:
try:
return dill.loads(s)
except Exception: # pylint: disable=broad-except
if enable_trace:
dill.dill._trace(True) # pylint: disable=protected-access
return dill.loads(s)
else:
raise
finally:
dill.dill._trace(False) # pylint: disable=protected-access
def dump_session(file_path):
"""For internal use only; no backwards-compatibility guarantees.
Pickle the current python session to be used in the worker.
Note: Due to the inconsistency in the first dump of dill dump_session we
create and load the dump twice to have consistent results in the worker and
the running session. Check: https://github.com/uqfoundation/dill/issues/195
"""
with _pickle_lock_unless_py2:
dill.dump_session(file_path)
dill.load_session(file_path)
return dill.dump_session(file_path)
def load_session(file_path):
with _pickle_lock_unless_py2:
return dill.load_session(file_path)
| 34.191558 | 86 | 0.712658 |
c73fb17b2cecf6385d73714f70a0ecfff73c9543 | 2,243 | py | Python | launch/ign_moveit2_headless.launch.py | Tiamat-Tech/drl_grasping | e67efee1cdbeeb3cb1e4d028890bbfc601e7840c | [
"BSD-3-Clause"
] | 126 | 2020-11-02T11:08:07.000Z | 2022-03-31T16:25:06.000Z | launch/ign_moveit2_headless.launch.py | Tiamat-Tech/drl_grasping | e67efee1cdbeeb3cb1e4d028890bbfc601e7840c | [
"BSD-3-Clause"
] | 68 | 2020-11-02T13:18:29.000Z | 2022-02-27T17:38:50.000Z | launch/ign_moveit2_headless.launch.py | Tiamat-Tech/drl_grasping | e67efee1cdbeeb3cb1e4d028890bbfc601e7840c | [
"BSD-3-Clause"
] | 27 | 2021-01-20T16:15:41.000Z | 2022-03-15T10:44:43.000Z | """Forwarded launch of ign_moveit2 (with RViz2 disabled by default)"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
def generate_launch_description():
# Launch Arguments
use_sim_time = LaunchConfiguration('use_sim_time', default=True)
config_rviz2 = LaunchConfiguration('config_rviz2', default="")
log_level = LaunchConfiguration('log_level', default='fatal')
return LaunchDescription([
# Launch Arguments
DeclareLaunchArgument(
'use_sim_time',
default_value=use_sim_time,
description="If true, use simulated clock"),
DeclareLaunchArgument(
'config_rviz2',
default_value=config_rviz2,
description="Path to config for RViz2. If empty, RViz2 will be disabled"),
DeclareLaunchArgument(
'log_level',
default_value=log_level,
description="Log level of all nodes launched by this script"),
# MoveIt2 move_group action server with necessary ROS2 <-> Ignition bridges
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
[os.path.join(get_package_share_directory('ign_moveit2'),
'launch', 'ign_moveit2.launch.py')]),
launch_arguments=[('use_sim_time', use_sim_time),
('config_rviz2', config_rviz2),
('log_level', log_level)]),
# JointTrajectory bridge for gripper (ROS2 -> IGN)
Node(package='ros_ign_bridge',
executable='parameter_bridge',
name='parameter_bridge_gripper_trajectory',
output='screen',
arguments=['/gripper_trajectory@trajectory_msgs/msg/JointTrajectory]ignition.msgs.JointTrajectory',
'--ros-args', '--log-level', log_level],
parameters=[{'use_sim_time': use_sim_time}])
])
| 43.134615 | 112 | 0.66741 |
3fa6b76150340746bcd10f60ca49a627b7397a36 | 3,801 | py | Python | sphinxcontrib/needs/directives/needextract.py | gregegg/sphinxcontrib-needs | b0c10a44756bb8f16313dcf52e17fd87cf47e780 | [
"MIT"
] | null | null | null | sphinxcontrib/needs/directives/needextract.py | gregegg/sphinxcontrib-needs | b0c10a44756bb8f16313dcf52e17fd87cf47e780 | [
"MIT"
] | null | null | null | sphinxcontrib/needs/directives/needextract.py | gregegg/sphinxcontrib-needs | b0c10a44756bb8f16313dcf52e17fd87cf47e780 | [
"MIT"
] | null | null | null | """
"""
import sys
import urllib
from docutils import nodes
from docutils.parsers.rst import directives
from sphinxcontrib.needs.layout import create_need
from sphinxcontrib.needs.filter_common import FilterBase, procces_filters
from sphinxcontrib.needs.directives.utils import no_needs_found_paragraph, used_filter_paragraph
if sys.version_info.major < 3:
urlParse = urllib.quote_plus
else:
urlParse = urllib.parse.quote_plus
class Needextract(nodes.General, nodes.Element):
pass
class NeedextractDirective(FilterBase):
"""
Directive to filter needs and present them as normal needs with given layout and style.
"""
option_spec = {'layout': directives.unchanged_required,
'style': directives.unchanged_required,
'show_filters': directives.flag,
}
# Update the options_spec with values defined in the FilterBase class
option_spec.update(FilterBase.base_option_spec)
def run(self):
env = self.state.document.settings.env
if not hasattr(env, 'need_all_needextracts'):
env.need_all_needextracts = {}
# be sure, global var is available. If not, create it
if not hasattr(env, 'needs_all_needs'):
env.needs_all_needs = {}
targetid = "needextract-{docname}-{id}".format(
docname=env.docname,
id=env.new_serialno('needextract'))
targetnode = nodes.target('', '', ids=[targetid])
# Add the need and all needed information
env.need_all_needextracts[targetid] = {
'docname': env.docname,
'lineno': self.lineno,
'target_node': targetnode,
'env': env,
'export_id': self.options.get("export_id", ""),
'layout': self.options.get("layout", None),
'style': self.options.get("style", None),
'show_filters': True if self.options.get("show_filters", False) is None else False,
}
env.need_all_needextracts[targetid].update(self.collect_filter_attributes())
return [targetnode] + [Needextract('')]
def process_needextract(app, doctree, fromdocname):
"""
Replace all needextrac nodes with a list of the collected needs.
"""
env = app.builder.env
for node in doctree.traverse(Needextract):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ('ids', 'names', 'classes', 'dupnames'):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needextract = env.need_all_needextracts[id]
all_needs = env.needs_all_needs
content = []
all_needs = list(all_needs.values())
found_needs = procces_filters(all_needs, current_needextract)
for need_info in found_needs:
need_extract = create_need(need_info['id'], app,
layout=current_needextract['layout'],
style=current_needextract['style'],
docname=current_needextract['docname'])
content.append(need_extract)
if len(content) == 0:
content.append(no_needs_found_paragraph())
if current_needextract["show_filters"]:
content.append(used_filter_paragraph(current_needextract))
node.replace_self(content)
| 35.858491 | 111 | 0.629045 |
2eafe96c7592b46bd4499f2df33d983c0e63dc1c | 2,470 | py | Python | trove/tests/unittests/common/test_secure_serializer.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 244 | 2015-01-01T12:04:44.000Z | 2022-03-25T23:38:39.000Z | trove/tests/unittests/common/test_secure_serializer.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 6 | 2015-08-18T08:19:10.000Z | 2022-03-05T02:32:36.000Z | trove/tests/unittests/common/test_secure_serializer.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 178 | 2015-01-02T15:16:58.000Z | 2022-03-23T03:30:20.000Z | # Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from trove.common.rpc import secure_serializer as ssz
from trove.tests.unittests import trove_testtools
class TestSecureSerializer(trove_testtools.TestCase):
def setUp(self):
self.key = 'xuUyAKn5mDANoM5sRxQsb6HGiugWVD'
self.data = '5rzFfaKU630rRxL1g3c80EHnHDf534'
self.context = {'fld1': 3, 'fld2': 'abc'}
super(TestSecureSerializer, self).setUp()
def tearDown(self):
super(TestSecureSerializer, self).tearDown()
def test_sz_nokey_serialize_entity(self):
sz = ssz.SecureSerializer(base=None, key=None)
en = sz.serialize_entity(self.context, self.data)
self.assertEqual(en, self.data)
def test_sz_nokey_deserialize_entity(self):
sz = ssz.SecureSerializer(base=None, key=None)
en = sz.deserialize_entity(self.context, self.data)
self.assertEqual(en, self.data)
def test_sz_nokey_serialize_context(self):
sz = ssz.SecureSerializer(base=None, key=None)
en = sz.serialize_context(self.context)
self.assertEqual(en, self.context)
def test_sz_nokey_deserialize_context(self):
sz = ssz.SecureSerializer(base=None, key=None)
en = sz.deserialize_context(self.context)
self.assertEqual(en, self.context)
def test_sz_entity(self):
sz = ssz.SecureSerializer(base=None, key=self.key)
en = sz.serialize_entity(self.context, self.data)
self.assertNotEqual(en, self.data)
self.assertEqual(sz.deserialize_entity(self.context, en),
self.data)
def test_sz_context(self):
sz = ssz.SecureSerializer(base=None, key=self.key)
sctxt = sz.serialize_context(self.context)
self.assertNotEqual(sctxt, self.context)
self.assertEqual(sz.deserialize_context(sctxt),
self.context)
| 38 | 78 | 0.689879 |
9a2b63caaa2ed27d7c0e5097716cb0c6314f7a34 | 3,803 | py | Python | ls/joyous/tests/test_holidays.py | Pandevmonium/ls.joyous | 53da85c8d979850eae06019e65d0e9fc61620acc | [
"BSD-3-Clause"
] | null | null | null | ls/joyous/tests/test_holidays.py | Pandevmonium/ls.joyous | 53da85c8d979850eae06019e65d0e9fc61620acc | [
"BSD-3-Clause"
] | null | null | null | ls/joyous/tests/test_holidays.py | Pandevmonium/ls.joyous | 53da85c8d979850eae06019e65d0e9fc61620acc | [
"BSD-3-Clause"
] | null | null | null | # ------------------------------------------------------------------------------
# Test Holidays
# ------------------------------------------------------------------------------
import sys
import datetime as dt
from unittest.mock import Mock
from django.conf import settings
from django.test import TestCase, override_settings
from ls.joyous.models.calendar import CalendarPage
from ls.joyous.models.events import SimpleEventPage
from ls.joyous.holidays import Holidays
from ls.joyous.holidays.parser import parseHolidays, _parseSubdivisions
from .testutils import freeze_timetz, getPage
# ------------------------------------------------------------------------------
class TestHolidays(TestCase):
@override_settings()
def testNoSettings(self):
del settings.JOYOUS_HOLIDAYS
hols = Holidays()
self.assertEqual(hols.simple, {})
self.assertEqual(hols.srcs, [{}])
self.assertEqual(hols.get(dt.date(1999,4,25)), "")
def testNZSetting(self):
hols = Holidays()
self.assertEqual(hols.get(dt.date(1999,4,25)), "Anzac Day")
@override_settings(JOYOUS_HOLIDAYS = None)
def testSimple(self):
hols = Holidays()
hols.add(dt.date(1999,4,29), "HAPPY HAPPY")
self.assertEqual(hols.get(dt.date(1999,4,29)), "HAPPY HAPPY")
@override_settings(JOYOUS_HOLIDAYS = None)
def testWorkalendar(self):
class Woral:
get_holiday_label = Mock(return_value="JOY JOY")
woral = Woral()
hols = Holidays()
hols.register(woral)
self.assertEqual(hols.srcs, [{}, woral])
self.assertEqual(hols.get(dt.date(1999,4,30)), "JOY JOY")
woral.get_holiday_label.assert_called_with(dt.date(1999,4,30))
def testMultiHolidays(self):
hols = Holidays()
hols.add(dt.date(1999,1,1), "Gliffy")
hols.add(dt.date(1999,1,1), "Whatnot")
self.assertEqual(hols.get(dt.date(1999,1,1)),
"Gliffy, Whatnot, New Year's Day")
# ------------------------------------------------------------------------------
class TestParser(TestCase):
def testScotland(self):
hols = parseHolidays("Scotland")
self.assertEqual(hols.get(dt.date(2019,11,30)), "St. Andrew's Day")
def testAllCountries(self):
from ls.joyous.holidays.parser import _PYTHON_HOLIDAYS_MAP
hols = parseHolidays("*")
classes = [hol.__class__ for hol in hols.holidays if hol.country]
self.assertCountEqual(classes, _PYTHON_HOLIDAYS_MAP.values())
def testCountriesNE(self):
hols = parseHolidays("*[NE]")
self.assertEqual(hols.get(dt.date(2019,3,1)),
"Jahrestag der Ausrufung der Republik")
self.assertEqual(hols.get(dt.date(2019,4,26)),
"Arbor Day")
def testNorthIsland(self):
hols = parseHolidays("NZ[NTL,AUK,HKB,TKI,WGN]")
self.assertEqual(hols.get(dt.date(2020,1,20)),
"Wellington Anniversary Day")
self.assertEqual(hols.get(dt.date(2020,1,27)),
"Auckland Anniversary Day")
self.assertEqual(hols.get(dt.date(2020,3,9)),
"Taranaki Anniversary Day")
self.assertEqual(hols.get(dt.date(2020,10,23)),
"Hawke's Bay Anniversary Day")
def testInvalidCountry(self):
self.assertIsNone(parseHolidays("Ruritania"))
def testInvalidSubdivision(self):
from holidays import UK
self.assertEqual(_parseSubdivisions("ZZZ", UK), 0)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 40.892473 | 80 | 0.545359 |
0686c37b41766b0dac19a06ece35911c162f8bea | 468 | py | Python | utils/summarizer_eager.py | yigitozgumus/IACV_Project | 0e012139a33c76ca88505c28270f1250181ec701 | [
"MIT"
] | 3 | 2019-07-27T14:00:42.000Z | 2020-01-17T17:07:51.000Z | utils/summarizer_eager.py | yigitozgumus/IACV_Project | 0e012139a33c76ca88505c28270f1250181ec701 | [
"MIT"
] | null | null | null | utils/summarizer_eager.py | yigitozgumus/IACV_Project | 0e012139a33c76ca88505c28270f1250181ec701 | [
"MIT"
] | 4 | 2019-10-22T02:58:26.000Z | 2020-10-06T09:59:26.000Z | import tensorflow as tf
import os
class Summarizer_eager:
def __init__(self, config):
self.config = config
self.summary_placeholders = {}
self.summary_ops = {}
self.train_summary_writer = tf.summary.create_file_writer(
os.path.join(self.config.log.summary_dir, "train")
)
self.test_summary_writer = tf.summary.create_file_writer(
os.path.join(self.config.log.summary_dir, "test")
)
| 29.25 | 66 | 0.647436 |
c850ff4ce67150f989d53c53bf8ae46052ab577d | 1,676 | py | Python | tests/test_weight_init.py | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 | [
"Apache-2.0"
] | null | null | null | tests/test_weight_init.py | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 | [
"Apache-2.0"
] | null | null | null | tests/test_weight_init.py | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks.layers import trunc_normal_
TEST_CASES = [
[{"mean": 0.0, "std": 1.0, "a": 2, "b": 4}, (6, 12, 3, 1, 7)],
[{"mean": 0.3, "std": 0.6, "a": -1.0, "b": 1.3}, (1, 4, 4, 4)],
[{"mean": 0.1, "std": 0.4, "a": 1.3, "b": 1.8}, (5, 7, 7, 8, 9)],
]
TEST_ERRORS = [
[{"mean": 0.0, "std": 1.0, "a": 5, "b": 1.1}, (1, 1, 8, 8, 8)],
[{"mean": 0.3, "std": -0.1, "a": 1.0, "b": 2.0}, (8, 5, 2, 6, 9)],
[{"mean": 0.7, "std": 0.0, "a": 0.1, "b": 2.0}, (4, 12, 23, 17)],
]
class TestWeightInit(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_shape(self, input_param, input_shape):
im = torch.rand(input_shape)
trunc_normal_(im, **input_param)
self.assertEqual(im.shape, input_shape)
@parameterized.expand(TEST_ERRORS)
def test_ill_arg(self, input_param, input_shape):
with self.assertRaises(ValueError):
im = torch.rand(input_shape)
trunc_normal_(im, **input_param)
if __name__ == "__main__":
unittest.main()
| 34.916667 | 74 | 0.631862 |
c4d526ce389e11b3c3c1dd66b8fe29d88ceebc71 | 1,150 | py | Python | python/src/nnabla/backward_function/quantize_linear.py | isabella232/nnabla | 82a3c6fed382f889d1a4a429c696bb8cedf6ce79 | [
"Apache-2.0"
] | null | null | null | python/src/nnabla/backward_function/quantize_linear.py | isabella232/nnabla | 82a3c6fed382f889d1a4a429c696bb8cedf6ce79 | [
"Apache-2.0"
] | null | null | null | python/src/nnabla/backward_function/quantize_linear.py | isabella232/nnabla | 82a3c6fed382f889d1a4a429c696bb8cedf6ce79 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def quantize_linear_backward(inputs, round_mode='HALF_AWAY_FROM_ZERO', narrow_range=False, dtype=1):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
s0 = inputs[2]
z0 = inputs[3]
dx0 = dy / s0
return dx0, None, None
| 35.9375 | 100 | 0.724348 |
9b2eca8cf16c260a9ebaab5cef349d3c3a0ee168 | 1,534 | py | Python | Projects/Weather Check/Weather.py | criox4/Contribute-to-HacktoberFest2021 | be989c6d82b577fa0a6bc4692a74965df78ab80c | [
"MIT"
] | null | null | null | Projects/Weather Check/Weather.py | criox4/Contribute-to-HacktoberFest2021 | be989c6d82b577fa0a6bc4692a74965df78ab80c | [
"MIT"
] | null | null | null | Projects/Weather Check/Weather.py | criox4/Contribute-to-HacktoberFest2021 | be989c6d82b577fa0a6bc4692a74965df78ab80c | [
"MIT"
] | null | null | null |
from configparser import ConfigParser
import requests
from tkinter import *
from tkinter import messagebox
config_file = "config.ini"
config = ConfigParser()
config.read(config_file)
api_key = config['gfg']['api']
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&appid={}'
def getweather(city):
result = requests.get(url.format(city, api_key))
if result:
json = result.json()
city = json['name']
country = json['sys']
temp_kelvin = json['main']['temp']
temp_celsius = temp_kelvin-273.15
weather1 = json['weather'][0]['main']
final = [city, country, temp_kelvin,
temp_celsius, weather1]
return final
else:
print("NO Content Found")
# explicit function to
# search city
def search():
city = city_text.get()
weather = getweather(city)
if weather:
location_lbl['text'] = '{} ,{}'.format(weather[0], weather[1])
temperature_label['text'] = str(weather[3])+" Degree Celsius"
weather_l['text'] = weather[4]
else:
messagebox.showerror('Error', "Cannot find {}".format(city))
app = Tk()
app.title("Weather App")
app.geometry("400x400")
city_text = StringVar()
city_entry = Entry(app, textvariable=city_text)
city_entry.pack()
Search_btn = Button(app, text="Search",width=20, command=search)
Search_btn.pack()
location_lbl = Label(app, text="Location", font={'bold', 40})
location_lbl.pack()
temperature_label = Label(app, text="")
temperature_label.pack()
weather_l = Label(app, text="")
weather_l.pack()
app.mainloop() | 24.349206 | 69 | 0.67601 |
9e0607bd11e52dcb38793afcb1321faae8300a6b | 3,839 | py | Python | models/configs.py | yanghongji2007/cross_view_localization_EtoTR | 5b9e89027c69a5071955450ca3e5b10315393120 | [
"MIT"
] | 16 | 2021-11-19T03:06:52.000Z | 2022-03-16T13:32:59.000Z | models/configs.py | yanghongji2007/cross_view_localization_EtoTR | 5b9e89027c69a5071955450ca3e5b10315393120 | [
"MIT"
] | 3 | 2021-12-07T06:49:13.000Z | 2022-01-01T07:56:45.000Z | models/configs.py | yanghongji2007/cross_view_localization_L2LTR | 5b9e89027c69a5071955450ca3e5b10315393120 | [
"MIT"
] | null | null | null |
import ml_collections
def get_testing():
"""Returns a minimal configuration for testing."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 1
config.transformer.num_heads = 1
config.transformer.num_layers = 1
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_r50_b16_config():
"""Returns the Resnet50 + ViT-B/16 configuration."""
config = get_b16_config()
del config.patches.size
config.patches.grid = (8, 32)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
return config
def get_r50_b32_config():
"""Returns the Resnet50 + ViT-L/16 configuration."""
config = get_b32_config()
del config.patches.size
config.patches.grid = (4, 16)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 6, 3)
config.resnet.width_factor = 1
return config
def get_r50_l16_config():
"""Returns the Resnet50 + ViT-L/16 configuration."""
config = get_l16_config()
del config.patches.size
config.patches.grid = (8, 32)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
return config
def get_r50_l32_config():
"""Returns the Resnet50 + ViT-L/32 configuration."""
config = get_l32_config()
del config.patches.size
config.patches.grid = (4, 16)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 6, 3)
config.resnet.width_factor = 1
return config
def get_b32_config():
"""Returns the ViT-B/32 configuration."""
config = get_b16_config()
config.patches.size = (32, 32)
return config
def get_l16_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1024
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 4096
config.transformer.num_heads = 16
config.transformer.num_layers = 24
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_l32_config():
"""Returns the ViT-L/32 configuration."""
config = get_l16_config()
config.patches.size = (32, 32)
return config
def get_h14_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (14, 14)})
config.hidden_size = 1280
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 5120
config.transformer.num_heads = 16
config.transformer.num_layers = 32
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
| 30.959677 | 66 | 0.700443 |
f0a36fdfabedb5317a920cc1cb8e7b6aeabadc11 | 630 | py | Python | artemis/generators/simutable/providers/normal/__init__.py | artemis-analytics/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 4 | 2020-02-29T15:02:05.000Z | 2021-05-13T18:50:58.000Z | artemis/generators/simutable/providers/normal/__init__.py | artemis-analytics/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 25 | 2020-02-25T19:29:21.000Z | 2020-04-03T15:06:59.000Z | artemis/generators/simutable/providers/normal/__init__.py | ryanmwhitephd/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 2 | 2021-08-12T09:40:51.000Z | 2021-08-12T09:42:09.000Z | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Ryan Mackenzie White <ryan.white4@canada.ca>
#
"""
faker provider for creating a normal distribution
"""
import unittest
from faker import Faker
from faker.providers import BaseProvider
class Provider(BaseProvider):
def normal(self):
mu = 0
sigma = 1
return self.generator.random.normalvariate(mu, sigma)
class TestCase(unittest.TestCase):
def test(self):
fake = Faker()
provider = Provider(fake)
fake.add_provider(provider)
print(fake.normal())
if __name__ == "__main__":
unittest.main()
| 17.5 | 63 | 0.655556 |
19fa5c603bfafe16ed151e10fa8eb11a79106ede | 20,322 | py | Python | src/finn/transformation/fpgadataflow/create_stitched_ip.py | rbcarlos/finn | ffb1d66ae4a9dd0d4831b2f0a5c057aff9aeae5a | [
"BSD-3-Clause"
] | 1 | 2021-03-12T17:20:09.000Z | 2021-03-12T17:20:09.000Z | src/finn/transformation/fpgadataflow/create_stitched_ip.py | surangamh/finn | af783db8dc2a1d2e95bd569d39464b935520b6d2 | [
"BSD-3-Clause"
] | null | null | null | src/finn/transformation/fpgadataflow/create_stitched_ip.py | surangamh/finn | af783db8dc2a1d2e95bd569d39464b935520b6d2 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import warnings
import subprocess
from finn.transformation.base import Transformation
from finn.util.basic import get_by_name, make_build_dir, is_finn_op
from finn.custom_op.registry import getCustomOp
from finn.util.basic import get_num_default_workers
import multiprocessing as mp
from finn.transformation.fpgadataflow.replace_verilog_relpaths import (
ReplaceVerilogRelPaths,
)
class CreateStitchedIP(Transformation):
"""Create a Vivado IP Block Design project from all the generated IPs of a
graph. All nodes in the graph must have the fpgadataflow backend attribute,
and the PrepareIP transformation must have been previously run on
the graph. The resulting block design is also packaged as IP. The
transformation gets the fpgapart as a string.
Outcome if successful: sets the vivado_stitch_proj attribute in the ONNX
ModelProto's metadata_props field, with the created project dir as the
value. A make_project.tcl script is also placed under the same folder,
which is called to instantiate the per-layer IPs and stitch them together.
The packaged block design IP can be found under the ip subdirectory.
"""
def __init__(self, fpgapart, clk_ns, ip_name="finn_design", vitis=False):
super().__init__()
self.fpgapart = fpgapart
self.clk_ns = clk_ns
self.ip_name = ip_name
self.vitis = vitis
if float(clk_ns) not in [5.0, 10.0, 20.0]:
warnings.warn(
"""The chosen frequency may lead to failure due to clock divider
constraints."""
)
self.has_aximm = False
self.has_m_axis = False
self.m_axis_idx = 0
self.has_s_axis = False
self.s_axis_idx = 0
self.clock_reset_are_external = False
self.create_cmds = []
self.connect_cmds = []
# keep track of top-level interface names
self.intf_names = {
"clk": [],
"rst": [],
"s_axis": [],
"m_axis": [],
"aximm": [],
"axilite": [],
}
def connect_clk_rst(self, node):
inst_name = node.name
node_inst = getCustomOp(node)
clock_intf_name = node_inst.get_verilog_top_module_intf_names()["clk"][0]
reset_intf_name = node_inst.get_verilog_top_module_intf_names()["rst"][0]
# make clock and reset external, if they aren't already
if not self.clock_reset_are_external:
self.connect_cmds.append(
"make_bd_pins_external [get_bd_pins %s/%s]"
% (inst_name, clock_intf_name)
)
self.connect_cmds.append("set_property name ap_clk [get_bd_ports ap_clk_0]")
self.connect_cmds.append(
"make_bd_pins_external [get_bd_pins %s/%s]"
% (inst_name, reset_intf_name)
)
self.connect_cmds.append(
"set_property name ap_rst_n [get_bd_ports ap_rst_n_0]"
)
self.clock_reset_are_external = True
self.intf_names["clk"] = ["ap_clk"]
self.intf_names["rst"] = ["ap_rst_n"]
# otherwise connect clock and reset
else:
self.connect_cmds.append(
"connect_bd_net [get_bd_ports ap_rst_n] [get_bd_pins %s/%s]"
% (inst_name, reset_intf_name)
)
self.connect_cmds.append(
"connect_bd_net [get_bd_ports ap_clk] [get_bd_pins %s/%s]"
% (inst_name, clock_intf_name)
)
def connect_axi(self, node):
inst_name = node.name
node_inst = getCustomOp(node)
axilite_intf_name = node_inst.get_verilog_top_module_intf_names()["axilite"]
aximm_intf_name = node_inst.get_verilog_top_module_intf_names()["aximm"]
if len(axilite_intf_name) != 0:
self.connect_cmds.append(
"make_bd_intf_pins_external "
"[get_bd_intf_pins %s/%s]" % (inst_name, axilite_intf_name[0])
)
ext_if_name = "%s_%d" % (
axilite_intf_name[0],
len(self.intf_names["axilite"]),
)
self.intf_names["axilite"].append(ext_if_name)
if len(aximm_intf_name) != 0:
self.connect_cmds.append(
"make_bd_intf_pins_external [get_bd_intf_pins %s/%s]"
% (inst_name, aximm_intf_name[0])
)
self.connect_cmds.append(
"set_property name m_axi_gmem0 [get_bd_intf_ports m_axi_gmem_0]"
)
self.intf_names["aximm"] = ["m_axi_gmem0"]
assert self.has_aximm is False, "Currently limited to one AXI-MM interface"
self.has_aximm = True
def connect_m_axis_external(self, node):
inst_name = node.name
node_inst = getCustomOp(node)
output_intf_names = node_inst.get_verilog_top_module_intf_names()["m_axis"]
# make output axis external
for output_intf_name in output_intf_names:
self.connect_cmds.append(
"make_bd_intf_pins_external [get_bd_intf_pins %s/%s]"
% (inst_name, output_intf_name)
)
self.connect_cmds.append(
"set_property name m_axis_%d [get_bd_intf_ports %s_0]"
% (self.m_axis_idx, output_intf_name)
)
self.has_m_axis = True
self.intf_names["m_axis"].append("m_axis_%d" % self.m_axis_idx)
self.m_axis_idx += 1
def connect_s_axis_external(self, node):
inst_name = node.name
node_inst = getCustomOp(node)
input_intf_names = node_inst.get_verilog_top_module_intf_names()["s_axis"]
# make input axis external
for input_intf_name in input_intf_names:
self.connect_cmds.append(
"make_bd_intf_pins_external [get_bd_intf_pins %s/%s]"
% (inst_name, input_intf_name)
)
self.connect_cmds.append(
"set_property name s_axis_%d [get_bd_intf_ports %s_0]"
% (self.s_axis_idx, input_intf_name)
)
self.has_s_axis = True
self.intf_names["s_axis"].append("s_axis_%d" % self.s_axis_idx)
self.s_axis_idx += 1
def apply(self, model):
# ensure non-relative readmemh .dat files
model = model.transform(ReplaceVerilogRelPaths())
ip_dirs = ["list"]
# add RTL streamer IP
ip_dirs.append("/workspace/finn/finn-rtllib/memstream")
# ensure that all nodes are fpgadataflow, and that IPs are generated
for node in model.graph.node:
assert is_finn_op(node.domain), "Found non-FINN node"
backend_attribute = get_by_name(node.attribute, "backend")
assert backend_attribute is not None, "Backend node attribute is not set."
backend_value = backend_attribute.s.decode("UTF-8")
assert (
backend_value == "fpgadataflow"
), """Backend node attribute is not
set to "fpgadataflow"."""
node_inst = getCustomOp(node)
ip_dir_value = node_inst.get_nodeattr("ip_path")
assert os.path.isdir(ip_dir_value), "IP generation directory doesn't exist."
ip_dirs += [ip_dir_value]
self.create_cmds += node_inst.code_generation_ipi()
my_producer = model.find_producer(node.input[0])
self.connect_clk_rst(node)
self.connect_axi(node)
if my_producer is None:
# first node in graph
self.connect_s_axis_external(node)
if node.op_type == "TLastMarker":
assert (
node_inst.get_nodeattr("Direction") == "in"
), """Output TLastMarker incorrect direction"""
elif node.op_type == "IODMA" and len(model.graph.node) != 1:
# don't apply this check for a 1-node partition
assert (
node_inst.get_nodeattr("direction") == "in"
), """Input DMA incorrect direction"""
else:
# intermediate node
# wire up input(s) to previous node output(s)
# foreach input
# find producer
# find index of producer output connected to our target input
# get names of hdl interfaces for input and producer output
# issue a TCL directive to connect input to output
# if FC layer with mode "decoupled", add a streamer on input 1
for i in range(len(node.input)):
producer = model.find_producer(node.input[i])
if producer is None:
continue
j = list(producer.output).index(node.input[i])
src_intf_name = getCustomOp(
producer
).get_verilog_top_module_intf_names()["m_axis"][j]
dst_intf_name = node_inst.get_verilog_top_module_intf_names()[
"s_axis"
][i]
self.connect_cmds.append(
"connect_bd_intf_net [get_bd_intf_pins %s/%s] "
"[get_bd_intf_pins %s/%s]"
% (producer.name, src_intf_name, node.name, dst_intf_name)
)
if model.find_consumers(node.output[0]) is None:
# last node in graph
self.connect_m_axis_external(node)
if node.op_type == "TLastMarker":
assert (
node_inst.get_nodeattr("Direction") == "out"
), """Output TLastMarker incorrect direction"""
elif node.op_type == "IODMA" and len(model.graph.node) != 1:
assert (
node_inst.get_nodeattr("direction") == "out"
), """Output DMA incorrect direction"""
# create a temporary folder for the project
prjname = "finn_vivado_stitch_proj"
vivado_stitch_proj_dir = make_build_dir(prefix="vivado_stitch_proj_")
model.set_metadata_prop("vivado_stitch_proj", vivado_stitch_proj_dir)
# start building the tcl script
tcl = []
# create vivado project
tcl.append(
"create_project %s %s -part %s"
% (prjname, vivado_stitch_proj_dir, self.fpgapart)
)
# add all the generated IP dirs to ip_repo_paths
ip_dirs_str = " ".join(ip_dirs)
tcl.append("set_property ip_repo_paths [%s] [current_project]" % ip_dirs_str)
tcl.append("update_ip_catalog")
# create block design and instantiate all layers
block_name = self.ip_name
tcl.append('create_bd_design "%s"' % block_name)
tcl.extend(self.create_cmds)
tcl.extend(self.connect_cmds)
fclk_mhz = 1 / (self.clk_ns * 0.001)
fclk_hz = fclk_mhz * 1000000
model.set_metadata_prop("clk_ns", str(self.clk_ns))
tcl.append("set_property CONFIG.FREQ_HZ %f [get_bd_ports /ap_clk]" % fclk_hz)
tcl.append("regenerate_bd_layout")
tcl.append("validate_bd_design")
tcl.append("save_bd_design")
# create wrapper hdl (for rtlsim later on)
bd_base = "%s/%s.srcs/sources_1/bd/%s" % (
vivado_stitch_proj_dir,
prjname,
block_name,
)
bd_filename = "%s/%s.bd" % (bd_base, block_name)
tcl.append("make_wrapper -files [get_files %s] -top" % bd_filename)
wrapper_filename = "%s/hdl/%s_wrapper.v" % (bd_base, block_name)
tcl.append("add_files -norecurse %s" % wrapper_filename)
model.set_metadata_prop("wrapper_filename", wrapper_filename)
# synthesize to DCP and export stub, DCP and constraints
if self.vitis:
tcl.append(
"set_property SYNTH_CHECKPOINT_MODE Hierarchical [ get_files %s ]"
% bd_filename
)
tcl.append(
"set_property -name {STEPS.SYNTH_DESIGN.ARGS.MORE OPTIONS} "
"-value {-mode out_of_context} -objects [get_runs synth_1]"
)
num_workers = get_num_default_workers()
assert num_workers >= 0, "Number of workers must be nonnegative."
if num_workers == 0:
num_workers = mp.cpu_count()
tcl.append("launch_runs synth_1 -jobs %s" % str(num_workers))
tcl.append("wait_on_run [get_runs synth_1]")
tcl.append("open_run synth_1 -name synth_1")
tcl.append("write_verilog -force -mode synth_stub %s.v" % block_name)
tcl.append("write_checkpoint %s.dcp" % block_name)
tcl.append("write_xdc %s.xdc" % block_name)
tcl.append("report_utilization -file %s_partition_util.rpt" % block_name)
# export block design itself as an IP core
block_vendor = "xilinx_finn"
block_library = "finn"
block_vlnv = "%s:%s:%s:1.0" % (block_vendor, block_library, block_name)
model.set_metadata_prop("vivado_stitch_vlnv", block_vlnv)
model.set_metadata_prop("vivado_stitch_ifnames", str(self.intf_names))
tcl.append(
(
"ipx::package_project -root_dir %s/ip -vendor %s "
"-library %s -taxonomy /UserIP -module %s -import_files"
)
% (vivado_stitch_proj_dir, block_vendor, block_library, block_name)
)
tcl.append("set_property core_revision 2 [ipx::find_open_core %s]" % block_vlnv)
tcl.append("ipx::create_xgui_files [ipx::find_open_core %s]" % block_vlnv)
# if targeting Vitis, add some properties to the IP
if self.vitis:
tcl.append(
"ipx::remove_bus_parameter FREQ_HZ "
"[ipx::get_bus_interfaces CLK.AP_CLK -of_objects [ipx::current_core]]"
)
# replace source code with dcp
tcl.append(
"set_property sdx_kernel true [ipx::find_open_core %s]" % block_vlnv
)
tcl.append(
"set_property sdx_kernel_type rtl [ipx::find_open_core %s]" % block_vlnv
)
tcl.append(
"set_property supported_families { } [ipx::find_open_core %s]"
% block_vlnv
)
tcl.append(
"set_property xpm_libraries {XPM_CDC XPM_MEMORY XPM_FIFO} "
"[ipx::find_open_core %s]" % block_vlnv
)
tcl.append(
"set_property auto_family_support_level level_2 "
"[ipx::find_open_core %s]" % block_vlnv
)
# remove all files from synthesis and sim groups
# we'll replace with DCP, stub, and xdc
tcl.append(
"ipx::remove_all_file "
"[ipx::get_file_groups xilinx_anylanguagebehavioralsimulation]"
)
tcl.append(
"ipx::remove_all_file "
"[ipx::get_file_groups xilinx_anylanguagesynthesis]"
)
tcl.append(
"ipx::remove_file_group "
"xilinx_anylanguagebehavioralsimulation [ipx::current_core]"
)
tcl.append(
"ipx::remove_file_group "
"xilinx_anylanguagesynthesis [ipx::current_core]"
)
# remove sim and src folders
tcl.append("file delete -force %s/ip/sim" % vivado_stitch_proj_dir)
tcl.append("file delete -force %s/ip/src" % vivado_stitch_proj_dir)
# copy and add DCP, stub, and xdc
tcl.append("file mkdir %s/ip/dcp" % vivado_stitch_proj_dir)
tcl.append("file mkdir %s/ip/impl" % vivado_stitch_proj_dir)
tcl.append(
"file copy -force %s.dcp %s/ip/dcp"
% (block_name, vivado_stitch_proj_dir)
)
tcl.append(
"file copy -force %s.xdc %s/ip/impl"
% (block_name, vivado_stitch_proj_dir)
)
tcl.append("ipx::add_file_group xilinx_implementation [ipx::current_core]")
tcl.append(
"ipx::add_file impl/%s.xdc [ipx::get_file_groups xilinx_implementation]"
% block_name
)
tcl.append(
"set_property used_in [list implementation] "
"[ipx::get_files impl/%s.xdc "
"-of_objects [ipx::get_file_groups xilinx_implementation]]" % block_name
)
tcl.append(
"ipx::add_file_group " "xilinx_synthesischeckpoint [ipx::current_core]"
)
tcl.append(
"ipx::add_file dcp/%s.dcp "
"[ipx::get_file_groups xilinx_synthesischeckpoint]" % block_name
)
tcl.append(
"ipx::add_file_group xilinx_simulationcheckpoint [ipx::current_core]"
)
tcl.append(
"ipx::add_file dcp/%s.dcp "
"[ipx::get_file_groups xilinx_simulationcheckpoint]" % block_name
)
tcl.append("ipx::update_checksums [ipx::find_open_core %s]" % block_vlnv)
tcl.append("ipx::save_core [ipx::find_open_core %s]" % block_vlnv)
# export list of used Verilog files (for rtlsim later on)
tcl.append(
"set all_v_files [get_files -filter {FILE_TYPE == Verilog "
+ "&& USED_IN_SYNTHESIS == 1} ]"
)
v_file_list = "%s/all_verilog_srcs.txt" % vivado_stitch_proj_dir
tcl.append("set fp [open %s w]" % v_file_list)
# write each verilog filename to all_verilog_srcs.txt
tcl.append("foreach vf $all_v_files {puts $fp $vf}")
tcl.append("close $fp")
# write the project creator tcl script
tcl_string = "\n".join(tcl) + "\n"
with open(vivado_stitch_proj_dir + "/make_project.tcl", "w") as f:
f.write(tcl_string)
# create a shell script and call Vivado
make_project_sh = vivado_stitch_proj_dir + "/make_project.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(vivado_stitch_proj_dir))
f.write("vivado -mode batch -source make_project.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
return (model, False)
| 46.39726 | 88 | 0.596398 |
b659fa2233ed9355ea26bb67ab128916a1f5f50a | 721 | py | Python | example.py | vikramgorla/python-opendata-transport | 4f84c244a4c4d9deb7a606cdb34dc09f6ee9eba9 | [
"MIT"
] | null | null | null | example.py | vikramgorla/python-opendata-transport | 4f84c244a4c4d9deb7a606cdb34dc09f6ee9eba9 | [
"MIT"
] | null | null | null | example.py | vikramgorla/python-opendata-transport | 4f84c244a4c4d9deb7a606cdb34dc09f6ee9eba9 | [
"MIT"
] | null | null | null | """
Copyright (c) 2015-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import asyncio
import aiohttp
from opendata_transport import OpendataTransport
async def main():
with aiohttp.ClientSession() as session:
data = OpendataTransport('Bex', 'Vevey', loop, session)
await data.async_get_data()
# Print the start and the destination name
print("Train connections:", data.from_name, "->", data.to_name)
# Print the next three connections
print(data.connections)
# Print the details of the next connection
print(data.connections[0])
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 24.862069 | 72 | 0.699029 |
6e538c387b8f9ad1523f2c8957b1ca329fde917a | 1,406 | py | Python | instagram/urls.py | hkawinzi/The-_gram | 56b560f29e38f284a40c4c7c61df92943c9c0bad | [
"Unlicense"
] | null | null | null | instagram/urls.py | hkawinzi/The-_gram | 56b560f29e38f284a40c4c7c61df92943c9c0bad | [
"Unlicense"
] | 7 | 2021-03-19T02:20:13.000Z | 2022-02-10T09:28:24.000Z | instagram/urls.py | hkawinzi/The-_gram | 56b560f29e38f284a40c4c7c61df92943c9c0bad | [
"Unlicense"
] | null | null | null | """instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.views.generic.base import TemplateView
from django.conf import settings
from django.conf.urls.static import static
from accounts import views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', accounts_views.register, name='register'),
path('profile/', accounts_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='accountss/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='accountss/logout.html'), name='logout'),
path('', include('accounts.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38 | 105 | 0.726174 |
521a4e369296844f4331bf62a0e6ce22d382733b | 1,596 | py | Python | learning/sources/source_unity_exporter.py | bermeom/quadruped-robot | 5570c720a27b26f94236ebc2ff41f0a1549f10b8 | [
"MIT"
] | 8 | 2018-12-19T17:30:10.000Z | 2021-05-09T17:53:03.000Z | learning/sources/source_unity_exporter.py | bermeom/quadruped-robot | 5570c720a27b26f94236ebc2ff41f0a1549f10b8 | [
"MIT"
] | null | null | null | learning/sources/source_unity_exporter.py | bermeom/quadruped-robot | 5570c720a27b26f94236ebc2ff41f0a1549f10b8 | [
"MIT"
] | 2 | 2020-10-06T01:56:30.000Z | 2021-04-28T18:31:39.000Z | import tensorflow as tf
import tensorblock as tb
from tensorflow.python.tools import freeze_graph
def export_ugraph( brain, model_path, env_name, target_nodes):
"""
Unity ML Agents
Exports latest saved model to .bytes format for Unity embedding.
:brain: tensorblock brain
:param model_path: path of model checkpoints.
:param env_name: Name of associated Learning Environment.
:param target_nodes: Comma separated string of needed output nodes for embedded graph.
Example: To export: from sources.source_unity_exporter import *
export_ugraph (self.brain, "./trained_models/unity_contcatch_player_DDPG/", "continuouscatcher", "NormalActor/Output/Tanh")
raise SystemExit(0)
On Unity: scope = NormalActor/
action = /Output/Tanh
observation = Observation/Placeholder
"""
tf.train.write_graph(tf.Session().graph_def, model_path, 'raw_graph_def.pb', as_text=False)
ckpt = tf.train.get_checkpoint_state(model_path)
freeze_graph.freeze_graph(input_graph=model_path + '/raw_graph_def.pb',
input_binary=True,
input_checkpoint=ckpt.model_checkpoint_path,
output_node_names=target_nodes,
output_graph=model_path + '/' + env_name + '.bytes',
clear_devices=True, initializer_nodes="", input_saver="",
restore_op_name="save/restore_all", filename_tensor_name="save/Const:0")
| 53.2 | 147 | 0.639724 |
63ff8e6136b37c0ef5c4fc20c25e2df175fa9a24 | 12,594 | py | Python | salt/modules/libcloud_storage.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | salt/modules/libcloud_storage.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/modules/libcloud_storage.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
Apache Libcloud Storage Management
==================================
Connection module for Apache Libcloud Storage (object/blob) management for a full list
of supported clouds, see http://libcloud.readthedocs.io/en/latest/storage/supported_providers.html
Clouds include Amazon S3, Google Storage, Aliyun, Azure Blobs, Ceph, OpenStack swift
.. versionadded:: 2018.3.0
:configuration:
This module uses a configuration profile for one or multiple Storage providers
.. code-block:: yaml
libcloud_storage:
profile_test1:
driver: google_storage
key: GOOG0123456789ABCXYZ
secret: mysecret
profile_test2:
driver: s3
key: 12345
secret: mysecret
:depends: apache-libcloud
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
# Import salt libs
import salt.utils.args
import salt.utils.compat
from salt.utils.versions import LooseVersion as _LooseVersion
log = logging.getLogger(__name__)
# Import third party libs
REQUIRED_LIBCLOUD_VERSION = '1.5.0'
try:
#pylint: disable=unused-import
import libcloud
from libcloud.storage.providers import get_driver
#pylint: enable=unused-import
if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION):
raise ImportError()
logging.getLogger('libcloud').setLevel(logging.CRITICAL)
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def __virtual__():
'''
Only load if libcloud libraries exist.
'''
if not HAS_LIBCLOUD:
msg = ('A apache-libcloud library with version at least {0} was not '
'found').format(REQUIRED_LIBCLOUD_VERSION)
return (False, msg)
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
def _get_driver(profile):
config = __salt__['config.option']('libcloud_storage')[profile]
cls = get_driver(config['driver'])
args = config.copy()
del args['driver']
args['key'] = config.get('key')
args['secret'] = config.get('secret', None)
args['secure'] = config.get('secure', True)
args['host'] = config.get('host', None)
args['port'] = config.get('port', None)
return cls(**args)
def list_containers(profile, **libcloud_kwargs):
'''
Return a list of containers.
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's list_containers method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.list_containers profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
containers = conn.list_containers(**libcloud_kwargs)
ret = []
for container in containers:
ret.append({
'name': container.name,
'extra': container.extra
})
return ret
def list_container_objects(container_name, profile, **libcloud_kwargs):
'''
List container objects (e.g. files) for the given container_id on the given profile
:param container_name: Container name
:type container_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's list_container_objects method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.list_container_objects MyFolder profile1
'''
conn = _get_driver(profile=profile)
container = conn.get_container(container_name)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
objects = conn.list_container_objects(container, **libcloud_kwargs)
ret = []
for obj in objects:
ret.append({
'name': obj.name,
'size': obj.size,
'hash': obj.hash,
'container': obj.container.name,
'extra': obj.extra,
'meta_data': obj.meta_data
})
return ret
def create_container(container_name, profile, **libcloud_kwargs):
'''
Create a container in the cloud
:param container_name: Container name
:type container_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's create_container method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.create_container MyFolder profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
container = conn.create_container(container_name, **libcloud_kwargs)
return {
'name': container.name,
'extra': container.extra
}
def get_container(container_name, profile, **libcloud_kwargs):
'''
List container details for the given container_name on the given profile
:param container_name: Container name
:type container_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's get_container method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.get_container MyFolder profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
container = conn.get_container(container_name, **libcloud_kwargs)
return {
'name': container.name,
'extra': container.extra
}
def get_container_object(container_name, object_name, profile, **libcloud_kwargs):
'''
Get the details for a container object (file or object in the cloud)
:param container_name: Container name
:type container_name: ``str``
:param object_name: Object name
:type object_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's get_container_object method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.get_container_object MyFolder MyFile.xyz profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
obj = conn.get_container_object(container_name, object_name, **libcloud_kwargs)
return {
'name': obj.name,
'size': obj.size,
'hash': obj.hash,
'container': obj.container.name,
'extra': obj.extra,
'meta_data': obj.meta_data}
def download_object(container_name, object_name, destination_path, profile,
overwrite_existing=False, delete_on_failure=True, **libcloud_kwargs):
'''
Download an object to the specified destination path.
:param container_name: Container name
:type container_name: ``str``
:param object_name: Object name
:type object_name: ``str``
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash
mismatch / file size).
:type delete_on_failure: ``bool``
:param libcloud_kwargs: Extra arguments for the driver's download_object method
:type libcloud_kwargs: ``dict``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.download_object MyFolder me.jpg /tmp/me.jpg profile1
'''
conn = _get_driver(profile=profile)
obj = conn.get_object(container_name, object_name)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
return conn.download_object(obj, destination_path, overwrite_existing, delete_on_failure, **libcloud_kwargs)
def upload_object(file_path, container_name, object_name, profile, extra=None,
verify_hash=True, headers=None, **libcloud_kwargs):
'''
Upload an object currently located on a disk.
:param file_path: Path to the object on disk.
:type file_path: ``str``
:param container_name: Destination container.
:type container_name: ``str``
:param object_name: Object name.
:type object_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param verify_hash: Verify hash
:type verify_hash: ``bool``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:param headers: (optional) Additional request headers,
such as CORS headers. For example:
headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'}
:type headers: ``dict``
:param libcloud_kwargs: Extra arguments for the driver's upload_object method
:type libcloud_kwargs: ``dict``
:return: The object name in the cloud
:rtype: ``str``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.upload_object /file/to/me.jpg MyFolder me.jpg profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
container = conn.get_container(container_name)
obj = conn.upload_object(file_path, container, object_name, extra, verify_hash, headers, **libcloud_kwargs)
return obj.name
def delete_object(container_name, object_name, profile, **libcloud_kwargs):
'''
Delete an object in the cloud
:param container_name: Container name
:type container_name: ``str``
:param object_name: Object name
:type object_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_object method
:type libcloud_kwargs: ``dict``
:return: True if an object has been successfully deleted, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.delete_object MyFolder me.jpg profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
obj = conn.get_object(container_name, object_name, **libcloud_kwargs)
return conn.delete_object(obj)
def delete_container(container_name, profile, **libcloud_kwargs):
'''
Delete an object container in the cloud
:param container_name: Container name
:type container_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_container method
:type libcloud_kwargs: ``dict``
:return: True if an object container has been successfully deleted, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.delete_container MyFolder profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
container = conn.get_container(container_name)
return conn.delete_container(container, **libcloud_kwargs)
def extra(method, profile, **libcloud_kwargs):
'''
Call an extended method on the driver
:param method: Driver's method name
:type method: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_container method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml
'''
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
conn = _get_driver(profile=profile)
connection_method = getattr(conn, method)
return connection_method(**libcloud_kwargs)
| 29.914489 | 128 | 0.678577 |
f6cda114582c51c716509fc9ed139ab7257b11ed | 3,133 | py | Python | audb/core/utils.py | audeering/audb | 6174d8c03676dd3a868572393c3cb8c295da6f04 | [
"MIT"
] | 1 | 2022-03-17T10:36:23.000Z | 2022-03-17T10:36:23.000Z | audb/core/utils.py | audeering/audb | 6174d8c03676dd3a868572393c3cb8c295da6f04 | [
"MIT"
] | 143 | 2021-04-07T13:00:00.000Z | 2022-03-29T08:53:59.000Z | audb/core/utils.py | audeering/audb | 6174d8c03676dd3a868572393c3cb8c295da6f04 | [
"MIT"
] | null | null | null | import typing
import warnings
import audbackend
from audb.core.config import config
from audb.core.repository import Repository
def lookup_backend(
name: str,
version: str,
) -> audbackend.Backend:
r"""Return backend of requested database.
If the database is stored in several repositories,
only the first one is considered.
The order of the repositories to look for the database
is given by :attr:`config.REPOSITORIES`.
Args:
name: database name
version: version string
Returns:
backend
Raises:
RuntimeError: if database is not found
"""
return _lookup(name, version)[1]
def repository(
name: str,
version: str,
) -> Repository:
r"""Return repository that stores the requested database.
If the database is stored in several repositories,
only the first one is returned.
The order of the repositories to look for the database
is given by :attr:`config.REPOSITORIES`.
Args:
name: database name
version: version string
Returns:
repository that contains the database
Raises:
RuntimeError: if database is not found
"""
return _lookup(name, version)[0]
def mix_mapping(
mix: str,
warn: bool = True,
) -> typing.Tuple[typing.Optional[typing.List[int]], bool]:
r"""Argument mapping for deprecated mix argument.
Args:
mix: old mix argument from audb,
can be ``'mono'``, ``'stereo'``, ``'left'``, ``'right'``
warn: if ``True`` it shows a deprecation warning
Returns:
channels and mixdown arguments
"""
if warn:
warnings.warn(
"Argument 'mix' is deprecated "
"and will be removed with version '1.2.0'. "
"Use 'channels' and 'mixdown' instead.",
category=UserWarning,
stacklevel=2,
)
if mix == 'mono':
channels = None
mixdown = True
elif mix == 'stereo':
channels = [0, 1]
mixdown = False
elif mix == 'left':
channels = [0]
mixdown = False
elif mix == 'right':
channels = [1]
mixdown = False
elif mix is None:
channels = None
mixdown = False
else:
raise ValueError(
f"Using deprecated argument 'mix' with value '{mix}' "
"is no longer supported."
)
return channels, mixdown
def _lookup(
name: str,
version: str,
) -> typing.Tuple[Repository, audbackend.Backend]:
r"""Helper function to look up database in all repositories.
Returns repository, version and backend object.
"""
for repository in config.REPOSITORIES:
backend = audbackend.create(
repository.backend,
repository.host,
repository.name,
)
header = backend.join(name, 'db.yaml')
if backend.exists(header, version):
return repository, backend
raise RuntimeError(
'Cannot find version '
f'{version} '
f'for database '
f"'{name}'."
)
| 23.380597 | 68 | 0.591446 |
1cbe8d2cb09bfea4067aa2f0f02d6fd521f73f69 | 972 | py | Python | main.py | nan-dre/FFTNR | a66569fa11b0ee81345f5bffe8167cc5ae41a7fa | [
"MIT"
] | null | null | null | main.py | nan-dre/FFTNR | a66569fa11b0ee81345f5bffe8167cc5ae41a7fa | [
"MIT"
] | null | null | null | main.py | nan-dre/FFTNR | a66569fa11b0ee81345f5bffe8167cc5ae41a7fa | [
"MIT"
] | null | null | null | import scipy
import numpy as np
import librosa
from librosa import display
from matplotlib import pyplot as plt
import pprint
file_path = "sounds/a_tired_ghost.wav"
samples, sampling_rate = librosa.load(file_path, sr = None, mono = True,
offset = 0.0, duration = None)
def plot_wave():
plt.figure()
librosa.display.waveplot( y = samples, sr = sampling_rate )
plt.xlabel("Time")
plt.ylabel("Amplitutde")
plt.show()
def fft_plot(sampling_rate, samples):
n = len(samples)
T = 1/sampling_rate
yf = scipy.fft(samples)
xf = np.linspace(0.0, 1.0/(2.0*T), n//2)
fig, ax = plt.subplots()
ax.plot(xf, 2.0/n * np.abs(yf[:n//2]))
plt.grid()
return plt.show()
if __name__ == "__main__":
duration = len(samples) / sampling_rate
print(duration)
print(len(samples))
# for i in range(0,100):
# print(samples[i])
plot_wave()
# fft_plot(sampling_rate, samples)
| 24.923077 | 72 | 0.626543 |
0952344cd260252e19e99c938a02bb8c59f94368 | 3,284 | gyp | Python | cloud_print/gcp20/prototype/gcp20_device.gyp | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2017-04-05T01:51:34.000Z | 2018-02-15T03:11:54.000Z | cloud_print/gcp20/prototype/gcp20_device.gyp | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-12-13T19:44:12.000Z | 2021-12-13T19:44:12.000Z | cloud_print/gcp20/prototype/gcp20_device.gyp | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2017-04-05T01:52:03.000Z | 2022-02-13T17:58:45.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'target_defaults': {
'variables': {
'chromium_code': 1,
'enable_wexit_time_destructors': 1,
},
'include_dirs': [
'<(DEPTH)',
# To allow including "version.h"
'<(SHARED_INTERMEDIATE_DIR)',
],
},
'targets': [
{
'target_name': 'gcp20_device_lib',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'<(DEPTH)/google_apis/google_apis.gyp:google_apis',
'<(DEPTH)/jingle/jingle.gyp:notifier',
'<(DEPTH)/net/net.gyp:http_server',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/url/url.gyp:url_lib',
],
'sources': [
'cloud_print_response_parser.cc',
'cloud_print_response_parser.h',
'cloud_print_request.cc',
'cloud_print_request.h',
'cloud_print_requester.cc',
'cloud_print_requester.h',
'cloud_print_url_request_context_getter.cc',
'cloud_print_url_request_context_getter.h',
'cloud_print_xmpp_listener.cc',
'cloud_print_xmpp_listener.h',
'conio_posix.cc',
'conio_posix.h',
'command_line_reader.cc',
'command_line_reader.h',
'dns_packet_parser.cc',
'dns_packet_parser.h',
'dns_response_builder.cc',
'dns_response_builder.h',
'dns_sd_server.cc',
'dns_sd_server.h',
'local_settings.h',
'local_print_job.cc',
'local_print_job.h',
'print_job_handler.cc',
'print_job_handler.h',
'printer_state.cc',
'printer_state.h',
'printer.cc',
'printer.h',
'privet_http_server.cc',
'privet_http_server.h',
'service_parameters.cc',
'service_parameters.h',
'special_io.h',
'x_privet_token.cc',
'x_privet_token.h',
],
},
{
'target_name': 'gcp20_device',
'type': 'executable',
'dependencies': [
'gcp20_device_lib',
],
'sources': [
'gcp20_device.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
'AdditionalDependencies': [
# TODO(maksymb): Check which of whis libs is needed.
'secur32.lib',
'httpapi.lib',
'Ws2_32.lib',
],
},
},
},
{
'target_name': 'gcp20_device_unittests',
'type': 'executable',
'sources': [
'printer_unittest.cc',
'x_privet_token_unittest.cc',
],
'dependencies': [
'gcp20_device_lib',
'<(DEPTH)/base/base.gyp:run_all_unittests',
'<(DEPTH)/base/base.gyp:test_support_base',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
'AdditionalDependencies': [
'secur32.lib',
],
},
},
},
],
}
| 28.556522 | 100 | 0.553593 |
495a4b494bc98bdbbea89c503e7f2b807014ea68 | 3,645 | py | Python | examples/server/nss_http_server.py | mshang816/nss_http | 23c7d53e9617fbd49fc32f6168ff7a9f94086735 | [
"MIT"
] | 34 | 2015-01-29T14:41:00.000Z | 2021-07-13T15:02:11.000Z | examples/server/nss_http_server.py | mshang816/nss_http | 23c7d53e9617fbd49fc32f6168ff7a9f94086735 | [
"MIT"
] | 2 | 2016-05-20T05:47:47.000Z | 2018-07-13T21:43:46.000Z | examples/server/nss_http_server.py | mshang816/nss_http | 23c7d53e9617fbd49fc32f6168ff7a9f94086735 | [
"MIT"
] | 19 | 2016-02-29T13:20:45.000Z | 2021-11-18T11:23:13.000Z | #!/usr/bin/env python
import json
from flask import Flask, request, abort, Response
app = Flask(__name__)
@app.route('/passwd')
def passwd():
data = [
{
"pw_name": "testuser1", "pw_passwd": "x", "pw_uid": 6000, "pw_gid": 6000,
"pw_gecos": "Testing", "pw_dir": "/home/testuser1", "pw_shell": "/bin/bash",
},
{
"pw_name": "testuser2", "pw_passwd": "x", "pw_uid": 6001, "pw_gid": 6000,
"pw_gecos": None, "pw_dir": "/home/testuser2", "pw_shell": "/bin/bash",
},
{
"pw_name": "testuser3", "pw_passwd": "x", "pw_uid": 6002, "pw_gid": 6001,
"pw_gecos": None, "pw_dir": "/home/testuser3", "pw_shell": "/bin/bash",
},
{
"pw_name": "testuser4", "pw_passwd": "x", "pw_uid": 6003, "pw_gid": 6001,
"pw_gecos": None, "pw_dir": "/home/testuser4", "pw_shell": "/bin/bash",
},
]
name = request.args.get("name")
if name:
for struct in data:
if name == struct["pw_name"]:
return Response(json.dumps(struct), mimetype='application/json')
abort(404)
uid = request.args.get("uid")
if uid:
uid = int(uid)
for struct in data:
if uid == struct["pw_uid"]:
return Response(json.dumps(struct), mimetype='application/json')
abort(404)
return Response(json.dumps(data), mimetype='application/json')
@app.route('/group')
def group():
data = [
{ "gr_name": "testgroup1", "gr_passwd": "x", "gr_gid": 6000, "gr_mem": ["testuser1", "testuser2"], },
{ "gr_name": "testgroup2", "gr_passwd": "x", "gr_gid": 6001, "gr_mem": ["testuser3", "testuser4"], },
]
name = request.args.get("name")
if name:
for struct in data:
if name == struct["gr_name"]:
return Response(json.dumps(struct), mimetype='application/json')
abort(404)
gid = request.args.get("gid")
if gid:
gid = int(gid)
for struct in data:
if gid == struct["gr_gid"]:
return Response(json.dumps(struct), mimetype='application/json')
abort(404)
return Response(json.dumps(data), mimetype='application/json')
@app.route('/shadow')
def shadow():
data = [
{
"sp_namp": "testuser1", "sp_pwdp": "$1$BXZIu72k$S7oxt9hBiBl/O3Rm3H4Q30", "sp_lstchg": 16034,
"sp_min": 0, "sp_max": 99999, "sp_warn": 7, "sp_inact": None, "sp_expire": None, "sp_flag": None,
},
{
"sp_namp": "testuser2", "sp_pwdp": "$1$BXZIu72k$S7oxt9hBiBl/O3Rm3H4Q30", "sp_lstchg": 16034,
"sp_min": 0, "sp_max": 99999, "sp_warn": 7, "sp_inact": None, "sp_expire": None, "sp_flag": None,
},
{
"sp_namp": "testuser3", "sp_pwdp": "$1$BXZIu72k$S7oxt9hBiBl/O3Rm3H4Q30", "sp_lstchg": 16034,
"sp_min": 0, "sp_max": 99999, "sp_warn": 7, "sp_inact": None, "sp_expire": None, "sp_flag": None,
},
{
"sp_namp": "testuser4", "sp_pwdp": "$1$BXZIu72k$S7oxt9hBiBl/O3Rm3H4Q30", "sp_lstchg": 16034,
"sp_min": 0, "sp_max": 99999, "sp_warn": 7, "sp_inact": 10, "sp_expire": 50, "sp_flag": None,
},
]
name = request.args.get("name")
if name:
for struct in data:
if name == struct["sp_namp"]:
return Response(json.dumps(struct), mimetype='application/json')
abort(404)
return Response(json.dumps(data), mimetype='application/json')
if __name__ == "__main__":
app.debug = True
app.run(host="localhost", port=9669)
| 34.065421 | 109 | 0.545953 |
f0b51b96094818e7fb467dded1159ec891c45b35 | 4,571 | py | Python | nova/tests/api/openstack/compute/contrib/test_flavor_manage.py | bopopescu/extra-specs-1 | 6a14d8d7807727023b4d589af47e8a9605f12db1 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_flavor_manage.py | bopopescu/extra-specs-1 | 6a14d8d7807727023b4d589af47e8a9605f12db1 | [
"Apache-2.0"
] | 1 | 2020-07-24T14:14:13.000Z | 2020-07-24T14:14:13.000Z | nova/tests/api/openstack/compute/contrib/test_flavor_manage.py | bopopescu/extra-specs-1 | 6a14d8d7807727023b4d589af47e8a9605f12db1 | [
"Apache-2.0"
] | 1 | 2020-07-24T10:40:59.000Z | 2020-07-24T10:40:59.000Z | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import webob
from nova.api.openstack.compute.contrib import flavormanage
from nova.compute import instance_types
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_instance_type_by_flavor_id(flavorid):
if flavorid == 'failtest':
raise exception.NotFound("Not found sucka!")
elif not str(flavorid) == '1234':
raise Exception("This test expects flavorid 1234, not %s" % flavorid)
return {
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'frob',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
'updated_at': None,
'memory_mb': 256,
'vcpus': 1,
'flavorid': flavorid,
'swap': 0,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'id': 7
}
def fake_destroy(flavorname):
pass
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor):
newflavor = fake_get_instance_type_by_flavor_id(flavorid)
newflavor["name"] = name
newflavor["memory_mb"] = int(memory_mb)
newflavor["vcpus"] = int(vcpus)
newflavor["root_gb"] = int(root_gb)
newflavor["ephemeral_gb"] = int(ephemeral_gb)
newflavor["swap"] = swap
newflavor["rxtx_factor"] = float(rxtx_factor)
return newflavor
class FlavorManageTest(test.TestCase):
def setUp(self):
super(FlavorManageTest, self).setUp()
self.stubs.Set(instance_types,
"get_instance_type_by_flavor_id",
fake_get_instance_type_by_flavor_id)
self.stubs.Set(instance_types, "destroy", fake_destroy)
self.stubs.Set(instance_types, "create", fake_create)
self.controller = flavormanage.FlavorManageController()
def test_delete(self):
req = fakes.HTTPRequest.blank('/v2/123/flavors/1234')
res = self.controller._delete(req, 1234)
self.assertEqual(res.status_int, 202)
# subsequent delete should fail
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._delete, req, "failtest")
def test_create(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1234,
"swap": 512,
"rxtx_factor": 1,
}
}
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(expected)
res = req.get_response(fakes.wsgi_app())
body = json.loads(res.body)
for key in expected["flavor"]:
self.assertEquals(body["flavor"][key], expected["flavor"][key])
def test_instance_type_exists_exception_returns_409(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1235,
"swap": 512,
"rxtx_factor": 1,
}
}
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor):
raise exception.InstanceTypeExists()
self.stubs.Set(instance_types, "create", fake_create)
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(expected)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
| 32.190141 | 78 | 0.596368 |
e0be5382e22958080807db0b7f4d8a039acecb8b | 19,990 | py | Python | iotbx/regression/ncs/tst_ncs.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | iotbx/regression/ncs/tst_ncs.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | iotbx/regression/ncs/tst_ncs.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
import iotbx.ncs
from libtbx.test_utils import approx_equal, show_diff
from scitbx import matrix
import iotbx.ncs as ncs
from iotbx import pdb
import mmtbx.model
pdb_str_1="""\
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.496590 -0.643597 0.582393 0.00000
MTRIX2 2 0.867925 0.376088 -0.324443 0.00000
MTRIX3 2 -0.010221 0.666588 0.745356 0.00000
MTRIX1 3 -0.317946 -0.173437 0.932111 0.00000
MTRIX2 3 0.760735 -0.633422 0.141629 0.00000
MTRIX3 3 0.565855 0.754120 0.333333 0.00000
ATOM 1 N THR A 1 9.670 10.289 11.135 1.00 20.00 N
ATOM 2 CA THR A 2 9.559 8.931 10.615 1.00 20.00 C
ATOM 3 C THR A 3 9.634 7.903 11.739 1.00 20.00 C
ATOM 4 O THR B 4 10.449 8.027 12.653 1.00 20.00 O
ATOM 5 CB THR B 5 10.660 8.630 9.582 1.00 20.00 C
ATOM 6 OG1 THR A 6 10.560 9.552 8.490 1.00 20.00 O
ATOM 7 CG2 THR A 7 10.523 7.209 9.055 1.00 20.00 C
TER
"""
pdb_str_3="""
REMARK 0 Test molecule with BIOMOLECULE: 1
REMARK 0
REMARK 0 The test will generate the biomolecule (the multimer assembly)
REMARK 0 from the transformation matrices writen below
REMARK 0 and then compare the results to the calculated expected one
REMARK 350 CRYSTALLOGRAPHIC OPERATIONS ARE GIVEN.
REMARK 350 BIOMT1 1 1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT2 1 0.000000 1.000000 0.000000 0.000000
REMARK 350 BIOMT3 1 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT1 2 1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT2 2 0.000000 0.000000 -1.000000 0.000000
REMARK 350 BIOMT3 2 0.000000 1.000000 0.000000 0.000000
REMARK 350 BIOMT1 3 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT2 3 0.000000 1.000000 0.000000 0.000000
REMARK 350 BIOMT3 3 -1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT1 4 0.000000 -1.000000 0.000000 0.000000
REMARK 350 BIOMT2 4 1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT3 4 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT1 5 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT2 5 0.000000 1.000000 0.000000 0.000000
REMARK 350 BIOMT3 5 -1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT1 6 0.000000 -1.000000 0.000000 0.000000
REMARK 350 BIOMT2 6 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT3 6 -1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT1 7 0.500000 -0.866025 0.000000 0.000000
REMARK 350 BIOMT2 7 0.866025 0.500000 0.000000 0.000000
REMARK 350 BIOMT3 7 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT1 8 -0.500000 -0.866025 0.000000 0.000000
REMARK 350 BIOMT2 8 0.866025 -0.500000 0.000000 0.000000
REMARK 350 BIOMT3 8 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT1 9 1.000000 0.000000 0.000000 0.000000
REMARK 350 BIOMT2 9 0.000000 1.000000 0.000000 0.500000
REMARK 350 BIOMT3 9 0.000000 0.000000 1.000000 0.000000
REMARK 350 BIOMT1 10 -0.500000 -0.866025 0.000000 0.000000
REMARK 350 BIOMT2 10 0.866025 -0.500000 0.000000 0.000000
REMARK 350 BIOMT3 10 0.000000 0.000000 1.000000 -1.000000
MTRIX1 1 1.000000 0.000000 0.000000 0.00000
MTRIX2 1 0.000000 1.000000 0.000000 0.00000
MTRIX3 1 0.000000 0.000000 1.000000 0.00000
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 1.000000 0.000000 0.000000 0.00000
MTRIX2 2 0.000000 0.000000 -1.000000 0.00000
MTRIX3 2 0.000000 1.000000 0.000000 0.00000
MTRIX1 3 0.500000 -0.866025 0.000000 0.00000
MTRIX2 3 0.866025 0.500000 0.000000 0.00000
MTRIX3 3 0.000000 0.000000 1.000000 0.00000
MTRIX1 4 -0.500000 -0.866025 0.000000 0.00000
MTRIX2 4 0.866025 -0.500000 0.000000 0.00000
MTRIX3 4 0.000000 0.000000 1.000000 0.00000
MTRIX1 5 1.000000 0.000000 0.000000 0.00000
MTRIX2 5 0.000000 1.000000 0.000000 0.50000
MTRIX3 5 0.000000 0.000000 1.000000 0.00000
ATOM 1 N ILE A 40 1.000 1.000 1.000 1.00162.33 C
ATOM 2 CA LEU A 40 94.618 -5.253 91.582 1.00 87.10 C
ATOM 3 C ARG B 40 62.395 51.344 80.786 1.00107.25 C
HETATM 4 C1 EDO A 40 39.954 51.526 72.372 0.33 60.93 C
"""
pdb_str_4 = """\
REMARK 350 BIOMT1 1 1.000000 0.000000 0.000000 0.00000
REMARK 350 BIOMT2 1 0.000000 1.000000 0.000000 0.00000
REMARK 350 BIOMT3 1 0.000000 0.000000 1.000000 0.00000
REMARK 350 BIOMT1 2 0.309017 -0.951057 0.000000 0.00000
REMARK 350 BIOMT2 2 0.951057 0.309017 -0.000000 0.00000
REMARK 350 BIOMT3 2 0.000000 0.000000 1.000000 7.00000
REMARK 350 BIOMT1 3 -0.809017 -0.587785 0.000000 0.00000
REMARK 350 BIOMT2 3 0.587785 -0.809017 -0.000000 0.00000
REMARK 350 BIOMT3 3 0.000000 0.000000 1.000000 0.00000
CRYST1 1.000 1.000 1.000 90.00 90.00 90.00 P 1 1
ATOM 1 N ALA A 2 64.807-112.186 260.746 1.00160.99 N
ATOM 2 CA ALA A 2 64.727-111.450 262.002 1.00159.36 C
ATOM 3 C ALA A 2 63.960-110.148 261.805 1.00154.38 C
ATOM 4 O ALA A 2 62.935-109.914 262.452 1.00149.47 O
ATOM 5 CB ALA A 2 66.123-111.175 262.542 1.00156.98 C
ATOM 6 N SER A 3 64.474-109.323 260.896 1.00135.75 N
ATOM 7 CA SER A 3 63.887-108.040 260.510 1.00131.97 C
ATOM 8 C SER A 3 64.863-107.340 259.575 1.00140.51 C
ATOM 9 O SER A 3 65.864-107.925 259.165 1.00148.46 O
ATOM 10 CB SER A 3 63.641-107.147 261.726 1.00126.01 C
ATOM 11 OG SER A 3 64.002-105.804 261.453 1.00119.04 O
END
"""
pdb_str_5 = """\
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.309017 -0.951057 0.000000 0.00000
MTRIX2 2 0.951057 0.309017 -0.000000 0.00000
MTRIX3 2 0.000000 0.000000 1.000000 7.00000
MTRIX1 3 -0.809017 -0.587785 0.000000 0.00000
MTRIX2 3 0.587785 -0.809017 -0.000000 0.00000
MTRIX3 3 0.000000 0.000000 1.000000 0.00000
CRYST1 1.000 1.000 1.000 90.00 90.00 90.00 P 1 1
ATOM 757 N ASP A 247 16.068 -20.882 -28.984 1.00 35.93 N
ATOM 758 CA ASP A 247 15.914 -22.265 -28.600 1.00 47.90 C
ATOM 759 C ASP A 247 17.130 -23.042 -29.116 1.00 42.32 C
ATOM 760 O ASP A 247 17.461 -22.986 -30.301 1.00 47.25 O
ATOM 761 CB ASP A 247 14.621 -22.814 -29.198 1.00 47.22 C
ATOM 762 CG ASP A 247 14.068 -23.974 -28.412 1.00 61.15 C
ATOM 763 OD1 ASP A 247 14.359 -24.061 -27.196 1.00 63.66 O
ATOM 764 OD2 ASP A 247 13.341 -24.798 -29.012 1.00 77.01 O
ATOM 765 N VAL A 248 17.808 -23.746 -28.218 1.00 44.08 N
ATOM 766 CA VAL A 248 19.008 -24.503 -28.584 1.00 46.18 C
ATOM 767 C VAL A 248 18.668 -25.988 -28.583 1.00 53.97 C
ATOM 768 O VAL A 248 18.049 -26.478 -27.638 1.00 51.48 O
ATOM 769 CB VAL A 248 20.185 -24.226 -27.608 1.00 47.55 C
ATOM 770 CG1 VAL A 248 21.414 -25.015 -28.012 1.00 41.43 C
ATOM 771 CG2 VAL A 248 20.513 -22.743 -27.567 1.00 41.64 C
ATOM 772 N VAL A 249 19.057 -26.697 -29.641 1.00 54.29 N
ATOM 773 CA VAL A 249 18.662 -28.097 -29.810 1.00 60.17 C
ATOM 774 C VAL A 249 19.859 -29.041 -29.982 1.00 57.98 C
ATOM 775 O VAL A 249 20.731 -28.827 -30.828 1.00 58.31 O
ATOM 776 CB VAL A 249 17.671 -28.280 -30.997 1.00 60.85 C
ATOM 777 CG1 VAL A 249 16.500 -27.300 -30.884 1.00 48.00 C
ATOM 778 CG2 VAL A 249 18.386 -28.110 -32.337 1.00 59.99 C
TER
ATOM 780 N LYS B 151 4.045 -6.858 -32.823 1.00 45.22 N
ATOM 781 CA LYS B 151 4.686 -6.715 -34.123 1.00 50.40 C
ATOM 782 C LYS B 151 5.707 -5.554 -34.172 1.00 47.13 C
ATOM 783 O LYS B 151 6.820 -5.764 -34.625 1.00 52.91 O
ATOM 784 CB LYS B 151 3.657 -6.646 -35.268 1.00 40.73 C
ATOM 785 CG LYS B 151 4.264 -6.627 -36.661 1.00 55.98 C
ATOM 786 CD LYS B 151 3.272 -7.051 -37.745 1.00 72.14 C
ATOM 787 CE LYS B 151 2.529 -8.338 -37.375 1.00 75.11 C
ATOM 788 NZ LYS B 151 3.451 -9.400 -36.884 1.00 75.46 N
ATOM 789 N ARG B 152 5.369 -4.349 -33.709 1.00 42.01 N
ATOM 790 CA ARG B 152 6.399 -3.290 -33.702 1.00 40.51 C
ATOM 791 C ARG B 152 6.155 -2.002 -32.909 1.00 34.21 C
ATOM 792 O ARG B 152 5.015 -1.605 -32.636 1.00 33.77 O
ATOM 793 CB ARG B 152 6.845 -2.937 -35.130 1.00 40.62 C
ATOM 794 CG ARG B 152 5.842 -2.126 -35.925 1.00 45.94 C
ATOM 795 CD ARG B 152 6.341 -1.926 -37.341 1.00 42.75 C
ATOM 796 NE ARG B 152 7.478 -1.006 -37.404 1.00 45.27 N
ATOM 797 CZ ARG B 152 8.177 -0.763 -38.509 1.00 49.68 C
ATOM 798 NH1 ARG B 152 7.860 -1.382 -39.644 1.00 47.81 N
ATOM 799 NH2 ARG B 152 9.192 0.096 -38.482 1.00 48.06 N
END
"""
pdb_str_8 = """\
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.496590 -0.643597 0.582393 0.00000 1
MTRIX2 2 0.867925 0.376088 -0.324443 0.00000 1
MTRIX3 2 -0.010221 0.666588 0.745356 0.00000 1
MTRIX1 3 -0.317946 -0.173437 0.932111 0.00000 1
MTRIX2 3 0.760735 -0.633422 0.141629 0.00000 1
MTRIX3 3 0.565855 0.754120 0.333333 0.00000 1
ATOM 1 N THR A 1 9.670 10.289 11.135 1.00 20.00 N
ATOM 2 CA THR A 2 9.559 8.931 10.615 1.00 20.00 C
ATOM 3 C THR A 3 9.634 7.903 11.739 1.00 20.00 C
ATOM 4 O THR B 4 10.449 8.027 12.653 1.00 20.00 O
ATOM 5 CB THR B 5 10.660 8.630 9.582 1.00 20.00 C
ATOM 6 OG1 THR A 6 10.560 9.552 8.490 1.00 20.00 O
ATOM 7 CG2 THR A 7 10.523 7.209 9.055 1.00 20.00 C
END
"""
def exercise_03():
"""
Verify that there are no errors processing the write command
No inception of the output is done. Just making sure it does not break
"""
pdb_inp = pdb.input(source_info=None, lines=pdb_str_1)
transform_info = pdb_inp.process_MTRIX_records()
transforms_obj = iotbx.ncs.input(
hierarchy=pdb_inp.construct_hierarchy())
pdb_inp = pdb.input(source_info=None, lines=pdb_str_1)
transforms_obj.get_ncs_info_as_spec()
def exercise_04():
"""Test MTRIX record processing"""
expected = """\
ATOM 1 N ILE A 40 1.000 1.000 1.000 1.00162.33 C
ATOM 2 CA LEU A 40 94.618 -5.253 91.582 1.00 87.10 C
TER
ATOM 3 C ARG B 40 62.395 51.344 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO A 40 39.954 51.526 72.372 0.33 60.93 C
ATOM 1 N ILE C 40 1.000 -1.000 1.000 1.00162.33 C
ATOM 2 CA LEU C 40 94.618 -91.582 -5.253 1.00 87.10 C
TER
ATOM 3 C ARG D 40 62.395 -80.786 51.344 1.00107.25 C
TER
HETATM 4 C1 EDO C 40 39.954 -72.372 51.526 0.33 60.93 C
ATOM 1 N ILE E 40 1.000 1.000 -1.000 1.00162.33 C
ATOM 2 CA LEU E 40 91.582 -5.253 -94.618 1.00 87.10 C
TER
ATOM 3 C ARG F 40 80.786 51.344 -62.395 1.00107.25 C
TER
HETATM 4 C1 EDO E 40 72.372 51.526 -39.954 0.33 60.93 C
ATOM 1 N ILE G 40 -1.000 1.000 1.000 1.00162.33 C
ATOM 2 CA LEU G 40 5.253 94.618 91.582 1.00 87.10 C
TER
ATOM 3 C ARG H 40 -51.344 62.395 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO G 40 -51.526 39.954 72.372 0.33 60.93 C
ATOM 1 N ILE I 40 1.000 1.000 -1.000 1.00162.33 C
ATOM 2 CA LEU I 40 91.582 -5.253 -94.618 1.00 87.10 C
TER
ATOM 3 C ARG J 40 80.786 51.344 -62.395 1.00107.25 C
TER
HETATM 4 C1 EDO I 40 72.372 51.526 -39.954 0.33 60.93 C
ATOM 1 N ILE K 40 -1.000 1.000 -1.000 1.00162.33 C
ATOM 2 CA LEU K 40 5.253 91.582 -94.618 1.00 87.10 C
TER
ATOM 3 C ARG L 40 -51.344 80.786 -62.395 1.00107.25 C
TER
HETATM 4 C1 EDO K 40 -51.526 72.372 -39.954 0.33 60.93 C
ATOM 1 N ILE M 40 -0.366 1.366 1.000 1.00162.33 C
ATOM 2 CA LEU M 40 51.858 79.315 91.582 1.00 87.10 C
TER
ATOM 3 C ARG N 40 -13.268 79.708 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO M 40 -24.646 60.364 72.372 0.33 60.93 C
ATOM 1 N ILE O 40 -1.366 0.366 1.000 1.00162.33 C
ATOM 2 CA LEU O 40 -42.760 84.568 91.582 1.00 87.10 C
TER
ATOM 3 C ARG P 40 -75.663 28.364 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO O 40 -64.600 8.838 72.372 0.33 60.93 C
ATOM 1 N ILE Q 40 1.000 1.500 1.000 1.00162.33 C
ATOM 2 CA LEU Q 40 94.618 -4.753 91.582 1.00 87.10 C
TER
ATOM 3 C ARG R 40 62.395 51.844 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO Q 40 39.954 52.026 72.372 0.33 60.93 C
ATOM 1 N ILE S 40 -1.366 0.366 0.000 1.00162.33 C
ATOM 2 CA LEU S 40 -42.760 84.568 90.582 1.00 87.10 C
TER
ATOM 3 C ARG T 40 -75.663 28.364 79.786 1.00107.25 C
TER
HETATM 4 C1 EDO S 40 -64.600 8.838 71.372 0.33 60.93 C
END
"""
pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str_3)
model = mmtbx.model.manager(pdb_inp, expand_with_mtrix=False)
model.expand_with_BIOMT_records()
assert not show_diff(expected, model.model_as_pdb())
def exercise_05():
"""Test MTRIX record processing"""
cau_expected_results = """\
ATOM 1 N ILE A 40 1.000 1.000 1.000 1.00162.33 C
ATOM 2 CA LEU A 40 94.618 -5.253 91.582 1.00 87.10 C
TER
ATOM 3 C ARG B 40 62.395 51.344 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO A 40 39.954 51.526 72.372 0.33 60.93 C
ATOM 1 N ILE C 40 1.000 -1.000 1.000 1.00162.33 C
ATOM 2 CA LEU C 40 94.618 -91.582 -5.253 1.00 87.10 C
TER
ATOM 3 C ARG D 40 62.395 -80.786 51.344 1.00107.25 C
TER
HETATM 4 C1 EDO C 40 39.954 -72.372 51.526 0.33 60.93 C
ATOM 1 N ILE E 40 -0.366 1.366 1.000 1.00162.33 C
ATOM 2 CA LEU E 40 51.858 79.315 91.582 1.00 87.10 C
TER
ATOM 3 C ARG F 40 -13.268 79.708 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO E 40 -24.646 60.364 72.372 0.33 60.93 C
ATOM 1 N ILE G 40 -1.366 0.366 1.000 1.00162.33 C
ATOM 2 CA LEU G 40 -42.760 84.568 91.582 1.00 87.10 C
TER
ATOM 3 C ARG H 40 -75.663 28.364 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO G 40 -64.600 8.838 72.372 0.33 60.93 C
ATOM 1 N ILE I 40 1.000 1.500 1.000 1.00162.33 C
ATOM 2 CA LEU I 40 94.618 -4.753 91.582 1.00 87.10 C
TER
ATOM 3 C ARG J 40 62.395 51.844 80.786 1.00107.25 C
TER
HETATM 4 C1 EDO I 40 39.954 52.026 72.372 0.33 60.93 C
END
"""
# use MTRIX data
pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str_3)
model = mmtbx.model.manager(pdb_inp)
assert not show_diff(cau_expected_results, model.model_as_pdb())
def exercise_06():
""" Test that when building bio-molecule and then finding NCS relations
from it, we get the same rotation and translation"""
pdb_strings = [pdb_str_4, pdb_str_5]
for method,pdb_string in enumerate(pdb_strings):
pdb_inp = pdb.input(source_info=None, lines=pdb_string)
model = mmtbx.model.manager(pdb_inp, expand_with_mtrix=False)
crystal_symmetry = model.crystal_symmetry()
# The exact transforms from pdb_string
r1_expected = matrix.sqr(
[0.309017, -0.951057, 0.0,0.951057, 0.309017,-0.0,0.0,0.0,1.0])
r2_expected = matrix.sqr(
[-0.809017,-0.587785,0.0,0.587785,-0.809017,-0.0,0.0,0.0,1.0])
t1_expected = matrix.col([0,0,7])
t2_expected = matrix.col([0,0,0])
# Look at biomt records retrieved from PDB file
if method == 0:
rec = model._model_input.process_BIOMT_records()
model.expand_with_BIOMT_records()
h = model.get_hierarchy()
else:
rec = model._model_input.process_MTRIX_records()
model.expand_with_MTRIX_records()
h = model.get_hierarchy()
r1 = rec.r[1]
r2 = rec.r[2]
t1 = rec.t[1]
t2 = rec.t[2]
assert approx_equal(r1, r1_expected, eps=0.001)
assert approx_equal(t1, t1_expected, eps=0.1)
assert approx_equal(r2, r2_expected, eps=0.001)
assert approx_equal(t2, t2_expected, eps=0.1)
# Look at the rotation and translation found by the NCS search
s = h.as_pdb_string(crystal_symmetry=crystal_symmetry)
ncs_obj = ncs.input(hierarchy=pdb.input(
source_info=None, lines=s).construct_hierarchy())
nrgl = ncs_obj.get_ncs_restraints_group_list()
assert approx_equal(r1_expected, nrgl[0].copies[0].r, eps=0.001)
assert approx_equal(t1_expected, nrgl[0].copies[0].t, eps=0.1)
assert approx_equal(r2_expected, nrgl[0].copies[1].r, eps=0.001)
assert approx_equal(t2_expected, nrgl[0].copies[1].t, eps=0.1)
if method == 0:
assert nrgl.get_n_groups() == 1
elif method == 1:
assert nrgl.get_n_groups() == 2
def exercise_08():
"""
Test for MTRIX record when copies already present in file
"""
pdb_inp = pdb.input(source_info=None, lines=pdb_str_8)
model = mmtbx.model.manager(pdb_inp)
assert model.get_number_of_atoms() == 7
if(__name__=='__main__'):
exercise_03()
exercise_04()
exercise_05()
exercise_06()
exercise_08()
| 53.449198 | 78 | 0.553077 |
ac239a329dd6b099be4ed3e1b0466e64189adccb | 11,307 | py | Python | core/providers/constants/test_contract.py | AsiganTheSunk/python3-gnosis-cli | c4c2638aa75b8a8268ad899d6cea1e602227ef19 | [
"MIT"
] | null | null | null | core/providers/constants/test_contract.py | AsiganTheSunk/python3-gnosis-cli | c4c2638aa75b8a8268ad899d6cea1e602227ef19 | [
"MIT"
] | null | null | null | core/providers/constants/test_contract.py | AsiganTheSunk/python3-gnosis-cli | c4c2638aa75b8a8268ad899d6cea1e602227ef19 | [
"MIT"
] | null | null | null | test_address_contract = "0xf79cb3BEA83BD502737586A6E8B133c378FD1fF2"
test_abi_contract = [{"name": "TokenPurchase", "inputs": [{"type": "address", "name": "buyer", "indexed": 'true'}, {"type": "uint256", "name": "eth_sold", "indexed": 'true'}, {"type": "uint256", "name": "tokens_bought", "indexed": 'true'}], "anonymous": 'false', "type": "event"}, {"name": "EthPurchase", "inputs": [{"type": "address", "name": "buyer", "indexed": 'true'}, {"type": "uint256", "name": "tokens_sold", "indexed": 'true'}, {"type": "uint256", "name": "eth_bought", "indexed": 'true'}], "anonymous": 'false', "type": "event"}, {"name": "AddLiquidity", "inputs": [{"type": "address", "name": "provider", "indexed": 'true'}, {"type": "uint256", "name": "eth_amount", "indexed": 'true'}, {"type": "uint256", "name": "token_amount", "indexed": 'true'}], "anonymous": 'false', "type": "event"}, {"name": "RemoveLiquidity", "inputs": [{"type": "address", "name": "provider", "indexed": 'true'}, {"type": "uint256", "name": "eth_amount", "indexed": 'true'}, {"type": "uint256", "name": "token_amount", "indexed": 'true'}], "anonymous": 'false', "type": "event"}, {"name": "Transfer", "inputs": [{"type": "address", "name": "_from", "indexed": 'true'}, {"type": "address", "name": "_to", "indexed": 'true'}, {"type": "uint256", "name": "_value", "indexed": 'false'}], "anonymous": 'false', "type": "event"}, {"name": "Approval", "inputs": [{"type": "address", "name": "_owner", "indexed": 'true'}, {"type": "address", "name": "_spender", "indexed": 'true'}, {"type": "uint256", "name": "_value", "indexed": 'false'}], "anonymous": 'false', "type": "event"}, {"name": "setup", "outputs": [], "inputs": [{"type": "address", "name": "token_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 175875}, {"name": "addLiquidity", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "min_liquidity"}, {"type": "uint256", "name": "max_tokens"}, {"type": "uint256", "name": "deadline"}], "constant": 'false', "payable": 'true', "type": "function", "gas": 82616}, {"name": "removeLiquidity", "outputs": [{"type": "uint256", "name": "out"}, {"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "amount"}, {"type": "uint256", "name": "min_eth"}, {"type": "uint256", "name": "min_tokens"}, {"type": "uint256", "name": "deadline"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 116814}, {"name": "__default__", "outputs": [], "inputs": [], "constant": 'false', "payable": 'true', "type": "function"}, {"name": "ethToTokenSwapInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "min_tokens"}, {"type": "uint256", "name": "deadline"}], "constant": 'false', "payable": 'true', "type": "function", "gas": 12757}, {"name": "ethToTokenTransferInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "min_tokens"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}], "constant": 'false', "payable": 'true', "type": "function", "gas": 12965}, {"name": "ethToTokenSwapOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}, {"type": "uint256", "name": "deadline"}], "constant": 'false', "payable": 'true', "type": "function", "gas": 50463}, {"name": "ethToTokenTransferOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}], "constant": 'false', "payable": 'true', "type": "function", "gas": 50671}, {"name": "tokenToEthSwapInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}, {"type": "uint256", "name": "min_eth"}, {"type": "uint256", "name": "deadline"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 47503}, {"name": "tokenToEthTransferInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}, {"type": "uint256", "name": "min_eth"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 47712}, {"name": "tokenToEthSwapOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "eth_bought"}, {"type": "uint256", "name": "max_tokens"}, {"type": "uint256", "name": "deadline"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 50175}, {"name": "tokenToEthTransferOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "eth_bought"}, {"type": "uint256", "name": "max_tokens"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 50384}, {"name": "tokenToTokenSwapInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}, {"type": "uint256", "name": "min_tokens_bought"}, {"type": "uint256", "name": "min_eth_bought"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "token_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 51007}, {"name": "tokenToTokenTransferInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}, {"type": "uint256", "name": "min_tokens_bought"}, {"type": "uint256", "name": "min_eth_bought"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}, {"type": "address", "name": "token_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 51098}, {"name": "tokenToTokenSwapOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}, {"type": "uint256", "name": "max_tokens_sold"}, {"type": "uint256", "name": "max_eth_sold"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "token_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 54928}, {"name": "tokenToTokenTransferOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}, {"type": "uint256", "name": "max_tokens_sold"}, {"type": "uint256", "name": "max_eth_sold"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}, {"type": "address", "name": "token_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 55019}, {"name": "tokenToExchangeSwapInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}, {"type": "uint256", "name": "min_tokens_bought"}, {"type": "uint256", "name": "min_eth_bought"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "exchange_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 49342}, {"name": "tokenToExchangeTransferInput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}, {"type": "uint256", "name": "min_tokens_bought"}, {"type": "uint256", "name": "min_eth_bought"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}, {"type": "address", "name": "exchange_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 49532}, {"name": "tokenToExchangeSwapOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}, {"type": "uint256", "name": "max_tokens_sold"}, {"type": "uint256", "name": "max_eth_sold"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "exchange_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 53233}, {"name": "tokenToExchangeTransferOutput", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}, {"type": "uint256", "name": "max_tokens_sold"}, {"type": "uint256", "name": "max_eth_sold"}, {"type": "uint256", "name": "deadline"}, {"type": "address", "name": "recipient"}, {"type": "address", "name": "exchange_addr"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 53423}, {"name": "getEthToTokenInputPrice", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "eth_sold"}], "constant": 'true', "payable": 'false', "type": "function", "gas": 5542}, {"name": "getEthToTokenOutputPrice", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_bought"}], "constant": 'true', "payable": 'false', "type": "function", "gas": 6872}, {"name": "getTokenToEthInputPrice", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "tokens_sold"}], "constant": 'true', "payable": 'false', "type": "function", "gas": 5637}, {"name": "getTokenToEthOutputPrice", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "uint256", "name": "eth_bought"}], "constant": 'true', "payable": 'false', "type": "function", "gas": 6897}, {"name": "tokenAddress", "outputs": [{"type": "address", "name": "out"}], "inputs": [], "constant": 'true', "payable": 'false', "type": "function", "gas": 1413}, {"name": "factoryAddress", "outputs": [{"type": "address", "name": "out"}], "inputs": [], "constant": 'true', "payable": 'false', "type": "function", "gas": 1443}, {"name": "balanceOf", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "address", "name": "_owner"}], "constant": 'true', "payable": 'false', "type": "function", "gas": 1645}, {"name": "transfer", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "address", "name": "_to"}, {"type": "uint256", "name": "_value"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 75034}, {"name": "transferFrom", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "address", "name": "_from"}, {"type": "address", "name": "_to"}, {"type": "uint256", "name": "_value"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 110907}, {"name": "approve", "outputs": [{"type": "bool", "name": "out"}], "inputs": [{"type": "address", "name": "_spender"}, {"type": "uint256", "name": "_value"}], "constant": 'false', "payable": 'false', "type": "function", "gas": 38769}, {"name": "allowance", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [{"type": "address", "name": "_owner"}, {"type": "address", "name": "_spender"}], "constant": 'true', "payable": 'false', "type": "function", "gas": 1925}, {"name": "name", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": 'true', "payable": 'false', "type": "function", "gas": 1623}, {"name": "symbol", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": 'true', "payable": 'false', "type": "function", "gas": 1653}, {"name": "decimals", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [], "constant": 'true', "payable": 'false', "type": "function", "gas": 1683}, {"name": "totalSupply", "outputs": [{"type": "uint256", "name": "out"}], "inputs": [], "constant": 'true', "payable": 'false', "type": "function", "gas": 1713}]
| 2,261.4 | 11,235 | 0.581144 |
a36d9f3851fe3e27724b7d2d0e8fb7aff6265ea8 | 3,471 | py | Python | server/ships.py | DrunyaGames/SeaBattle-Plus-Plus | a60250b1af17651c9a2af5dcaa39a11a3a09dfa5 | [
"MIT"
] | null | null | null | server/ships.py | DrunyaGames/SeaBattle-Plus-Plus | a60250b1af17651c9a2af5dcaa39a11a3a09dfa5 | [
"MIT"
] | null | null | null | server/ships.py | DrunyaGames/SeaBattle-Plus-Plus | a60250b1af17651c9a2af5dcaa39a11a3a09dfa5 | [
"MIT"
] | null | null | null | from errors import *
class BaseShip:
name = '%s deck'
def __init__(self, ship_len, x, y, direction, field):
self.len = ship_len
self.field = field
self.x = x
self.y = y
self.direction = direction
self.name = self.name % self.len
self.shoots = []
self.shoots_count = 0
self.is_dead = False
@property
def cells(self):
x, y = self.x, self.y
if not self.direction and y - self.len + 1 >= 0:
cells = [self.field[x, i + 1] for i in range(y - self.len, y)]
elif self.direction == 1:
cells = [self.field[i, y] for i in range(x, x + self.len)]
elif self.direction == 2:
cells = [self.field[x, i] for i in range(y, y + self.len)]
elif self.direction == 3 and x - self.len + 1 >= 0:
cells = [self.field[i + 1, y] for i in range(x - self.len, x)]
else:
raise BadFieldCoords
return cells
def place(self):
self.field.add_obj_to_cells(self.cells, self)
def shoot(self, x, y):
self.shoots_count += 1
if (x, y) not in self.shoots:
self.shoots.append((x, y))
if len(self.shoots) >= self.len:
self.is_dead = True
class SpecialShip:
def __init__(self, x, y, direction, field):
self.field = field
self.x = x
self.y = y
self.direction = direction
self.n = 0
self.shoots = []
self.shoots_count = 0
self.is_dead = False
def shoot(self, x, y):
self.shoots_count += 1
if (x, y) not in self.shoots:
self.shoots.append((x, y))
if len(self.shoots) >= self.n:
self.is_dead = True
class Hospital(SpecialShip):
name = 'hospital'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_dead = True
@property
def cells(self):
x, y = self.x, self.y
if y - 1 < 0 or x - 1 < 0:
raise BadFieldCoords
cells = [
self.field[x, y],
self.field[x, y + 1],
self.field[x, y - 1],
self.field[x + 1, y],
self.field[x - 1, y]
]
return cells
def place(self):
self.field.add_obj_to_cells(self.cells, self)
def shoot(self, x, y):
super().shoot(x, y)
self.field.player.missed_turns += 1
class TShip(SpecialShip):
name = 'trawler'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n = 4
@property
def cells(self):
x, y = self.x, self.y
if self.direction in [0, 2]:
if x - 1 < 0 or y - 1 < 0 and not self.direction:
raise BadFieldCoords
cells = [
self.field[x, y],
self.field[x + 1, y],
self.field[x - 1, y],
self.field[x, y - 1] if not self.direction else self.field[x, y + 1]
]
else:
if y - 1 < 0 or self.direction == 3 and x - 1 < 0:
raise BadFieldCoords
cells = [
self.field[x, y],
self.field[x, y + 1],
self.field[x, y - 1],
self.field[x + 1, y] if self.direction == 1 else self.field[x - 1, y]
]
return cells
def place(self):
self.field.add_obj_to_cells(self.cells, self)
| 26.496183 | 85 | 0.496399 |
01ae3b72dcf72283991f83b880e67e22b602807b | 4,780 | py | Python | test/functional/interface_http.py | Kopernikus-dev/step4.3 | 9051be4dfccdc64f534e950e81caae4bd740b275 | [
"MIT"
] | null | null | null | test/functional/interface_http.py | Kopernikus-dev/step4.3 | 9051be4dfccdc64f534e950e81caae4bd740b275 | [
"MIT"
] | null | null | null | test/functional/interface_http.py | Kopernikus-dev/step4.3 | 9051be4dfccdc64f534e950e81caae4bd740b275 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (PivxTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because encocoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 43.853211 | 109 | 0.632845 |
a8b29e5014eb4568e416846dd8e52f7b2f7f54d9 | 6,869 | py | Python | services/pipeline/bin/historical/migrations/populate_local_dt.py | e-mission/e-mission-ng-aggregator | 0ce43b93192459ac1864b8e88e96b83ea0929aa2 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | services/pipeline/bin/historical/migrations/populate_local_dt.py | e-mission/e-mission-ng-aggregator | 0ce43b93192459ac1864b8e88e96b83ea0929aa2 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 21 | 2018-12-19T07:09:45.000Z | 2021-12-13T20:07:36.000Z | services/pipeline/bin/historical/migrations/populate_local_dt.py | e-mission/e-mission-ng-aggregator | 0ce43b93192459ac1864b8e88e96b83ea0929aa2 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 2 | 2019-05-02T16:20:14.000Z | 2019-05-02T17:33:42.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Note that this script is only retained for historical purposes,
# to document how we expanded the local date entries. It will not run
# any more, since we have removed the trip, place, section and stop
# collections and merged them into the analysis database
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
# logging.basicConfig(level=logging.DEBUG)
import arrow
import argparse
import json
import emission.core.get_database as edb
import emission.core.wrapper.localdate as ecwld
# For entries in the timeseries, this is simple because all of them follow the "ts" -> "local_dt" -> "fmt_time" pattern.
# We can just parse the fmt_time to get an arrow object and then get all the components
# For trips, sections, places and stops, we still have the fmt time, we just need to parse individual fields properly
# In order to allow us to run multiple processes in parallel, this takes the
# key of the stream as the input. Then we can run multiple processes, one for
# each stream, in parallel
def get_local_date(fmt_time, timezone):
"""
When we parse the fmt time, we get a timezone offset, but not the timezone string.
Timezone string seems more portable, so we want to use that instead.
So we need to get it from somewhere else and pass it in here
"""
adt = arrow.get(fmt_time)
logging.debug("after parsing, adt = %s" % adt)
return ecwld.LocalDate.get_local_date(adt.timestamp, timezone)
def fix_timeseries(key):
tsdb = edb.get_timeseries_db()
tsdb_cursor = tsdb.find({'metadata.key': key})
logging.debug("Fixing %s entries for key %s" % (tsdb_cursor.count(), key))
data_local_dt = False
for entry in tsdb.find({'metadata.key': key}):
entry["metadata"]["write_local_dt"] = get_local_date(entry['metadata']['write_fmt_time'],
entry['metadata']['time_zone'])
if 'local_dt' in entry['data']:
if data_local_dt == False:
logging.info("overriding local_dt for key %s" % key)
data_local_dt = True
entry['data']['local_dt'] = get_local_date(entry['data']['fmt_time'],
entry['metadata']['time_zone'])
else:
if data_local_dt == True:
logging.info("not overriding local_dt for key %s" % key)
data_local_dt = False
tsdb.save(entry)
def fix_file(filename):
timeseries = json.load(open(filename))
logging.debug("Fixing %s entries for filename %s" % (len(timeseries), filename))
data_local_dt = False
for entry in timeseries:
entry["metadata"]["write_local_dt"] = get_local_date(entry['metadata']['write_fmt_time'],
entry['metadata']['time_zone'])
if 'local_dt' in entry['data']:
if data_local_dt == False:
logging.info("overriding local_dt for file %s" % filename)
data_local_dt = True
entry['data']['local_dt'] = get_local_date(entry['data']['fmt_time'],
entry['metadata']['time_zone'])
else:
if data_local_dt == True:
logging.info("not overriding local_dt for file %s" % filename)
data_local_dt = False
logging.debug("Finished converting %s entries" % len(timeseries))
json.dump(timeseries, open(filename, "w"), indent=4)
def fix_trips_or_sections(collection):
tsdb = edb.get_timeseries_db()
for entry in collection.find():
start_loc_entry = tsdb.find_one({'user_id': entry['user_id'],
'metadata.key': 'background/location', 'data.ts': entry['start_ts']})
end_loc_entry = tsdb.find_one({'user_id': entry['user_id'],
'metadata.key': 'background/location', 'data.ts': entry['end_ts']})
if start_loc_entry is not None:
start_tz = start_loc_entry['metadata']['time_zone']
else:
logging.warn("No start_loc_entry found for trip %s, returning default" % entry)
start_tz = "America/Los_Angeles"
if end_loc_entry is not None:
end_tz = end_loc_entry['metadata']['time_zone']
else:
logging.warn("No end_loc_entry found for trip %s, returning default" % entry)
end_tz = "America/Los_Angeles"
logging.debug("Found entries with metadata = %s, %s" % (start_tz, end_tz))
entry['start_local_dt'] = get_local_date(entry['start_fmt_time'], start_tz)
entry['end_local_dt'] = get_local_date(entry['end_fmt_time'], end_tz)
collection.save(entry)
def fix_stops_or_places(collection):
tsdb = edb.get_timeseries_db()
for entry in collection.find():
if 'enter_ts' in entry:
enter_loc_entry = tsdb.find_one({'user_id': entry['user_id'],
'metadata.key': 'background/location', 'data.ts': entry['enter_ts']})
if enter_loc_entry is not None:
enter_tz = enter_loc_entry['metadata']['time_zone']
else:
enter_tz = "America/Los_Angeles"
logging.debug("entry metadata timezone = %s" % enter_tz)
entry['enter_local_dt'] = get_local_date(entry['enter_fmt_time'],
enter_tz)
else:
logging.warning("No entry timestamp found, skipping")
if 'exit_ts' in entry:
exit_loc_entry = tsdb.find_one({'user_id': entry['user_id'],
'metadata.key': 'background/location', 'data.ts': entry['exit_ts']})
if exit_loc_entry is not None:
exit_tz = exit_loc_entry['metadata']['time_zone']
else:
exit_tz = "America/Los_Angeles"
logging.debug("exit metadata timezone = %s" % exit_tz)
entry['exit_local_dt'] = get_local_date(entry['exit_fmt_time'], exit_tz)
else:
logging.warning("No exit timestamp found, skipping")
collection.save(entry)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("key",
help="the key representing the stream that we want to fix")
parser.add_argument("-f", "--filename",
help="a saved timeline whose local_dt needs to be fixed. If this is specified, key is ignored")
args = parser.parse_args()
if args.filename is not None:
fix_file(args.filename)
elif args.key == "trips":
fix_trips_or_sections(edb.get_trip_new_db())
elif args.key == "sections":
fix_trips_or_sections(edb.get_section_new_db())
elif args.key == "places":
fix_stops_or_places(edb.get_place_db())
elif args.key == "stops":
fix_stops_or_places(edb.get_stop_db())
else:
fix_timeseries(args.key)
| 43.201258 | 120 | 0.649294 |
6c1e068efe1e236cb05442005c0048d66f5f1a96 | 6,408 | py | Python | mkt/feed/fakedata.py | clouserw/zamboni | c4a568b69c1613f27da41d46328b2975cbdc1c07 | [
"BSD-3-Clause"
] | null | null | null | mkt/feed/fakedata.py | clouserw/zamboni | c4a568b69c1613f27da41d46328b2975cbdc1c07 | [
"BSD-3-Clause"
] | null | null | null | mkt/feed/fakedata.py | clouserw/zamboni | c4a568b69c1613f27da41d46328b2975cbdc1c07 | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import random
from django.core.files.storage import default_storage as storage
from mpconstants.collection_colors import COLLECTION_COLORS
import pydenticon
from mkt.constants.regions import REGIONS_DICT
from mkt.constants.carriers import CARRIER_CHOICE_DICT
from mkt.webapps.fakedata import foreground, generate_apps
from mkt.feed.models import (FeedApp, FeedBrand,
FeedBrandMembership,
FeedCollection, FeedCollectionMembership,
FeedShelf, FeedShelfMembership, FeedItem)
dummy_text = 'foo bar baz blee zip zap cvan fizz buzz something'.split()
def rand_text(n=10):
"""Generate random string."""
return ' '.join(random.choice(dummy_text) for i in xrange(n))
def shelf(apps, **kw):
carrier = kw.get('carrier', random.choice(CARRIER_CHOICE_DICT.values()))
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
sh = FeedShelf.objects.create(
carrier=carrier.id,
description=kw.get('description', 'shelf for ' + carrier.name),
name=kw.get('name', '%s Op Shelf' % carrier.name),
region=region)
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(unicode(sh.name).encode('utf8'), 128, 128,
output_format='png')
with storage.open(sh.image_path(''), 'wb') as f:
f.write(img)
with storage.open(sh.image_path('_landing'), 'wb') as f:
f.write(img)
image_hash = hashlib.md5(img).hexdigest()[:8]
sh.update(slug=kw.get('slug', 'shelf-%d' % sh.pk),
image_hash=image_hash,
image_landing_hash=image_hash)
for a in apps:
FeedShelfMembership.objects.create(obj=sh, app=a)
FeedItem.objects.create(item_type='shelf', shelf=sh, region=region)
return sh
def brand(apps, type, **kw):
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
br = FeedBrand.objects.create(
layout=kw.get('layout', random.choice(['list', 'grid'])),
slug='brand-',
type=type)
br.update(slug=kw.get('slug', 'brand-%d' % br.pk))
for a in apps:
FeedBrandMembership.objects.create(obj=br, app=a)
FeedItem.objects.create(item_type='brand', brand=br, region=region)
return br
def collection(apps, slug, background_image=True, **kw):
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
colorname = kw.get('color', random.choice(COLLECTION_COLORS.keys()))
co = FeedCollection.objects.create(
type=kw.get('type', 'listing'),
color=colorname,
background_color=COLLECTION_COLORS[colorname],
slug=slug,
description=kw.get('description', ''))
name = kw.get('name', 'Collection %s' % co.pk)
if background_image:
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(name, 128, 128,
output_format='png')
with storage.open(co.image_path(''), 'wb') as f:
f.write(img)
image_hash = hashlib.md5(img).hexdigest()[:8]
else:
image_hash = None
co.name = name
co.image_hash = image_hash
co.save()
for a in apps:
FeedCollectionMembership.objects.create(obj=co, app=a)
FeedItem.objects.create(item_type='collection', collection=co,
region=region)
return co
def app_item(a, type, **kw):
region = REGIONS_DICT[kw.get('region', 'restofworld')].id
colorname = kw.get('color', random.choice(COLLECTION_COLORS.keys()))
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(a.app_slug, 128, 128,
output_format='png')
ap = FeedApp.objects.create(
app=a,
description=kw.get('description', rand_text(12)),
type=type,
color=colorname,
preview=kw.get('preview', None),
pullquote_attribution=kw.get('pullquote_attribution', None),
pullquote_rating=kw.get('pullquote_rating', None),
pullquote_text=kw.get('pullquote_text', None),
background_color=COLLECTION_COLORS[colorname],
slug=kw.get('slug', 'feed-app-%d' % a.pk))
with storage.open(ap.image_path(''), 'wb') as f:
f.write(img)
image_hash = hashlib.md5(img).hexdigest()[:8]
ap.update(image_hash=image_hash)
FeedItem.objects.create(item_type='app', app=ap, region=region)
return ap
def generate_feed_data():
apps = generate_apps(24)
apps1, apps2, apps3, apps4 = apps[:6], apps[6:12], apps[12:18], apps[18:]
shelf(apps1, slug='shelf', name='Shelf', description='')
shelf(apps2, slug='shelf-desc', name='Shelf Description',
description=rand_text())
brand(apps1, 'hidden-gem', slug='brand-grid', layout='grid')
brand(apps2, 'travel', slug='brand-list', layout='list')
co = collection([], slug='grouped')
co.add_app_grouped(apps1[0].pk, 'group 1')
co.add_app_grouped(apps1[1].pk, 'group 1')
co.add_app_grouped(apps1[2].pk, 'group 2')
co.add_app_grouped(apps1[3].pk, 'group 2')
co.add_app_grouped(apps1[4].pk, 'group 3')
co.add_app_grouped(apps1[5].pk, 'group 3')
collection(apps2, slug='coll-promo', type='promo', name='Coll Promo')
collection(apps2, slug='coll-promo-desc', type='promo',
name='Coll Promo Desc',
description=rand_text(),
background_image=False)
collection(apps2, slug='coll-promo-bg', type='promo',
description='', name='Coll Promo Background')
collection(apps2, slug='coll-promo-bg-desc', type='promo',
name='Coll Promo Background Desc',
description=rand_text(),
background_image=False)
collection(apps3, slug='coll-listing', type='listing',
name='Coll Listing')
collection(apps3, slug='coll-listing-desc', type='listing',
name='Coll Listing Desc',
description=rand_text())
app_item(apps4[0], type='icon', slug='feedapp-icon')
app_item(apps4[1], type='image', slug='feedapp-image')
app_item(apps4[2], type='description', slug='feedapp-description')
app_item(apps4[3], type='quote', slug='feedapp-quote',
pullquote_text='"%s"' % rand_text(12),
pullquote_rating=4,
pullquote_attribution="matt basta")
app_item(apps4[4], type='preview', slug='feedapp-preview')
| 40.815287 | 77 | 0.634363 |
f1c5a09904ad4010077dc0ac9f0794b0ad98f9cc | 13,954 | py | Python | pywren_ibm_cloud/tests.py | erezh16/pywren-ibm-cloud | 54d0d5346f15ae86ff95b5502da2fc062014adb3 | [
"Apache-2.0"
] | null | null | null | pywren_ibm_cloud/tests.py | erezh16/pywren-ibm-cloud | 54d0d5346f15ae86ff95b5502da2fc062014adb3 | [
"Apache-2.0"
] | null | null | null | pywren_ibm_cloud/tests.py | erezh16/pywren-ibm-cloud | 54d0d5346f15ae86ff95b5502da2fc062014adb3 | [
"Apache-2.0"
] | null | null | null | #
# (C) Copyright IBM Corp. 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
import argparse
import unittest
import pywren_ibm_cloud as pywren
import urllib.request
from pywren_ibm_cloud.storage import InternalStorage
from pywren_ibm_cloud.config import default_config, extract_storage_config
from multiprocessing.pool import ThreadPool
import logging
# logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="test all PyWren's functionality", usage='python -m pywren_ibm_cloud.tests [-c CONFIG] [-f TESTNAME]')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), metavar='', default=None, help="use json config file")
parser.add_argument('-t', '--test', metavar='', default='all', help='run a specific test, type "-t help" for tests list')
args = parser.parse_args()
CONFIG = default_config()
STORAGE_CONFIG = extract_storage_config(CONFIG)
STORAGE = InternalStorage(STORAGE_CONFIG).storage_handler
PREFIX = '__pywren.test'
TEST_FILES_URLS = ["http://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/vocab.enron.txt",
"http://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/vocab.kos.txt",
"http://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/vocab.nips.txt",
"http://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/vocab.nytimes.txt",
"http://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/vocab.pubmed.txt"]
def initTests():
print('Uploading test files...')
def up(param):
i, url = param
content = urllib.request.urlopen(url).read()
STORAGE.put_object(bucket_name=STORAGE_CONFIG['bucket'],
key='{}/test{}'.format(PREFIX, str(i)),
data=content)
return len(content.split())
pool = ThreadPool(128)
results = pool.map(up, enumerate(TEST_FILES_URLS))
pool.close()
pool.join()
result_to_compare = 1 + sum(results) # including result's word
STORAGE.put_object(bucket_name=STORAGE_CONFIG['bucket'],
key='{}/result'.format(PREFIX),
data=str(result_to_compare).encode())
def list_test_keys():
return STORAGE.list_keys(bucket_name=STORAGE_CONFIG['bucket'], prefix=PREFIX)
def cleanTests():
print('Deleting test files...')
for key in list_test_keys():
STORAGE.delete_object(bucket_name=STORAGE_CONFIG['bucket'],
key=key)
def hello_world(param):
return "Hello World!"
def simple_map_function(x, y):
return x + y
def simple_reduce_function(results):
total = 0
for map_result in results:
total = total + map_result
return total
def pywren_inside_pywren_map_function1(x):
def _func(x):
return x
pw = pywren.function_executor(config=CONFIG)
pw.map(_func, range(x))
return pw.get_result()
def pywren_inside_pywren_map_function2(x):
def _func(x):
return x
pw = pywren.function_executor(config=CONFIG)
pw.call_async(_func, x)
return pw.get_result()
def pywren_inside_pywren_map_function3(x):
def _func(x):
return x
pw = pywren.function_executor(config=CONFIG)
fut1 = pw.map(_func, range(x))
fut2 = pw.map(_func, range(x))
return [pw.get_result(fut1), pw.get_result(fut2)]
def my_map_function_obj(obj):
print('I am processing the object /{}/{}'.format(obj.bucket, obj.key))
counter = {}
data = obj.data_stream.read()
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
def my_map_function_url(url):
print('I am processing the object from {}'.format(url.path))
counter = {}
data = url.data_stream.read()
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
def my_map_function_ibm_cos(key_i, bucket_name, ibm_cos):
print('I am processing the object /{}/{}'.format(bucket_name, key_i))
counter = {}
data = ibm_cos.get_object(Bucket=bucket_name, Key=key_i)['Body'].read()
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
def my_reduce_function(results):
final_result = 0
for count in results:
for word in count:
final_result += count[word]
return final_result
def my_cloudobject_put(obj, internal_storage):
counter = my_map_function_obj(obj)
cloudobject = internal_storage.put_object(counter)
return cloudobject
def my_cloudobject_get(results, internal_storage):
data = [internal_storage.get_object(cloudobject) for cloudobject in results]
return my_reduce_function(data)
class TestPywren(unittest.TestCase):
def checkResult(self, result):
result_to_compare = STORAGE.get_object(bucket_name=STORAGE_CONFIG['bucket'],
key=f'{PREFIX}/result')
if isinstance(result, list):
total = 0
for r in result:
total += r
else:
total = result
self.assertEqual(total, int(result_to_compare))
def test_call_async(self):
print('Testing call_async()...')
pw = pywren.function_executor(config=CONFIG)
pw.call_async(hello_world, "")
result = pw.get_result()
self.assertEqual(result, "Hello World!")
pw = pywren.function_executor(config=CONFIG)
pw.call_async(simple_map_function, [4, 6])
result = pw.get_result()
self.assertEqual(result, 10)
pw = pywren.function_executor(config=CONFIG)
pw.call_async(simple_map_function, {'x': 2, 'y': 8})
result = pw.get_result()
self.assertEqual(result, 10)
def test_map(self):
print('Testing map()...')
iterdata = [[1, 1], [2, 2], [3, 3], [4, 4]]
pw = pywren.function_executor(config=CONFIG)
pw.map(simple_map_function, iterdata)
result = pw.get_result()
self.assertEqual(result, [2, 4, 6, 8])
def test_map_reduce(self):
print('Testing map_reduce()...')
iterdata = [[1, 1], [2, 2], [3, 3], [4, 4]]
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(simple_map_function, iterdata, simple_reduce_function)
result = pw.get_result()
self.assertEqual(result, 20)
def test_multiple_executions(self):
print('Testing multiple executions...')
pw = pywren.function_executor(config=CONFIG)
iterdata = [[1, 1], [2, 2]]
pw.map(simple_map_function, iterdata)
iterdata = [[3, 3], [4, 4]]
pw.map(simple_map_function, iterdata)
result = pw.get_result()
self.assertEqual(result, [2, 4, 6, 8])
iterdata = [[1, 1], [2, 2]]
pw.map(simple_map_function, iterdata)
result = pw.get_result()
self.assertEqual(result, [2, 4])
iterdata = [[1, 1], [2, 2]]
futures1 = pw.map(simple_map_function, iterdata)
result1 = pw.get_result(fs=futures1)
iterdata = [[3, 3], [4, 4]]
futures2 = pw.map(simple_map_function, iterdata)
result2 = pw.get_result(fs=futures2)
self.assertEqual(result1, [2, 4])
self.assertEqual(result2, [6, 8])
def test_internal_executions(self):
print('Testing internal executions...')
pw = pywren.function_executor(config=CONFIG)
pw.map(pywren_inside_pywren_map_function1, range(1, 11))
result = pw.get_result()
self.assertEqual(result, [0] + [list(range(i)) for i in range(2, 11)])
pw = pywren.function_executor(config=CONFIG)
pw.call_async(pywren_inside_pywren_map_function2, 10)
result = pw.get_result()
self.assertEqual(result, 10)
pw = pywren.function_executor(config=CONFIG)
pw.map(pywren_inside_pywren_map_function3, range(1, 11))
result = pw.get_result()
self.assertEqual(result, [[0, 0]] + [[list(range(i)), list(range(i))] for i in range(2, 11)])
def test_map_reduce_cos_bucket(self):
print('Testing map_reduce() over a COS bucket...')
sb = STORAGE_CONFIG['backend']
data_prefix = sb+'://'+STORAGE_CONFIG['bucket']+'/'+PREFIX+'/'
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_obj, data_prefix, my_reduce_function)
result = pw.get_result()
self.checkResult(result)
def test_map_reduce_cos_bucket_one_reducer_per_object(self):
print('Testing map_reduce() over a COS bucket with one reducer per object...')
sb = STORAGE_CONFIG['backend']
data_prefix = sb+'://'+STORAGE_CONFIG['bucket']+'/'+PREFIX+'/'
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_obj, data_prefix, my_reduce_function, reducer_one_per_object=True)
result = pw.get_result()
self.checkResult(result)
def test_map_reduce_cos_key(self):
print('Testing map_reduce() over COS keys...')
sb = STORAGE_CONFIG['backend']
bucket_name = STORAGE_CONFIG['bucket']
iterdata = [sb+'://'+bucket_name+'/'+key for key in list_test_keys()]
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_obj, iterdata, my_reduce_function)
result = pw.get_result()
self.checkResult(result)
def test_map_reduce_cos_key_one_reducer_per_object(self):
print('Testing map_reduce() over COS keys with one reducer per object...')
sb = STORAGE_CONFIG['backend']
bucket_name = STORAGE_CONFIG['bucket']
iterdata = [sb+'://'+bucket_name+'/'+key for key in list_test_keys()]
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_obj, iterdata, my_reduce_function, reducer_one_per_object=True)
result = pw.get_result()
self.checkResult(result)
def test_map_reduce_url(self):
print('Testing map_reduce() over URLs...')
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_url, TEST_FILES_URLS, my_reduce_function)
result = pw.get_result()
self.checkResult(result + 1)
def test_storage_handler(self):
print('Testing ibm_cos function arg...')
iterdata = [[key, STORAGE_CONFIG['bucket']] for key in list_test_keys()]
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_ibm_cos, iterdata, my_reduce_function)
result = pw.get_result()
self.checkResult(result)
def test_chunks_bucket(self):
print('Testing cunk_size on a bucket...')
data_prefix = STORAGE_CONFIG['bucket'] + '/' + PREFIX + '/'
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_obj, data_prefix, my_reduce_function, chunk_size=1*1024**2)
result = pw.get_result()
self.checkResult(result)
def test_chunks_bucket_one_reducer_per_object(self):
print('Testing cunk_size on a bucket with one reducer per object...')
data_prefix = STORAGE_CONFIG['bucket'] + '/' + PREFIX + '/'
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_map_function_obj, data_prefix, my_reduce_function, chunk_size=1*1024**2,
reducer_one_per_object=True)
result = pw.get_result()
self.checkResult(result)
def test_cloudobject(self):
print('Testing cloudobjects...')
data_prefix = STORAGE_CONFIG['bucket'] + '/' + PREFIX + '/'
pw = pywren.function_executor(config=CONFIG)
pw.map_reduce(my_cloudobject_put, data_prefix, my_cloudobject_get)
result = pw.get_result()
self.checkResult(result)
if __name__ == '__main__':
if args.test == 'help':
print("available test functions:")
print("-> test_call_async")
print("-> test_map")
print("-> test_map_reduce")
print("-> test_multiple_executions")
print("-> test_internal_executions")
print("-> test_map_reduce_cos_bucket")
print("-> test_map_reduce_cos_bucket_one_reducer_per_object")
print("-> test_map_reduce_cos_key")
print("-> test_map_reduce_cos_key_one_reducer_per_object")
print("-> test_map_reduce_url")
print("-> test_storage_handler")
print("-> test_chunks_bucket")
print("-> test_chunks_bucket_one_reducer_per_object")
print("-> test_cloudobject")
else:
suite = unittest.TestSuite()
if args.test == 'all':
suite.addTest(unittest.makeSuite(TestPywren))
else:
try:
suite.addTest(TestPywren(args.test))
except ValueError:
print("unknown test, use: --help")
sys.exit()
if args.config:
args.config = json.load(args.config)
initTests()
runner = unittest.TextTestRunner()
runner.run(suite)
cleanTests()
| 35.237374 | 147 | 0.644833 |
a78dba91e3e4f12c84e06fe3bcc4a4d2cc889af3 | 1,758 | py | Python | autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/base_tabular_preprocessing.py | LMZimmer/Auto-PyTorch_refactor | ac7a9ce35e87a428caca2ac108b362a54d3b8f3a | [
"Apache-2.0"
] | null | null | null | autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/base_tabular_preprocessing.py | LMZimmer/Auto-PyTorch_refactor | ac7a9ce35e87a428caca2ac108b362a54d3b8f3a | [
"Apache-2.0"
] | 34 | 2020-10-06T08:06:46.000Z | 2021-01-21T13:23:34.000Z | autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/base_tabular_preprocessing.py | LMZimmer/Auto-PyTorch_refactor | ac7a9ce35e87a428caca2ac108b362a54d3b8f3a | [
"Apache-2.0"
] | 1 | 2020-10-14T12:25:47.000Z | 2020-10-14T12:25:47.000Z | from typing import Dict, List, Optional
from sklearn.base import BaseEstimator
from autoPyTorch.pipeline.components.preprocessing.base_preprocessing import autoPyTorchPreprocessingComponent
class autoPyTorchTabularPreprocessingComponent(autoPyTorchPreprocessingComponent):
"""
Provides abstract interface for preprocessing algorithms in AutoPyTorch.
"""
_required_properties: List[str] = ['handles_sparse']
def __init__(self) -> None:
super().__init__()
self.preprocessor: Dict[str, Optional[BaseEstimator]] = dict(numerical=None, categorical=None)
def get_preprocessor_dict(self) -> Dict[str, BaseEstimator]:
"""
Returns early_preprocessor dictionary containing the sklearn numerical
and categorical early_preprocessor with "numerical" and "categorical"
keys. May contain None for a key if early_preprocessor does not
handle the datatype defined by key
Returns:
Dict[str, BaseEstimator]: early_preprocessor dictionary
"""
if (self.preprocessor['numerical'] and self.preprocessor['categorical']) is None:
raise AttributeError("{} can't return early_preprocessor dict without fitting first"
.format(self.__class__.__name__))
return self.preprocessor
def __str__(self) -> str:
""" Allow a nice understanding of what components where used """
string = self.__class__.__name__
info = vars(self)
# Remove unwanted info
info.pop('early_preprocessor', None)
info.pop('column_transformer', None)
info.pop('random_state', None)
if len(info.keys()) != 0:
string += " (" + str(info) + ")"
return string
| 39.954545 | 110 | 0.677474 |
5245d2199315fb28b45539333f2927bbac2c8069 | 432 | py | Python | env/Lib/site-packages/plotly/validators/mesh3d/colorbar/_tickprefix.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/mesh3d/colorbar/_tickprefix.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/mesh3d/colorbar/_tickprefix.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickprefix", parent_name="mesh3d.colorbar", **kwargs
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| 30.857143 | 79 | 0.666667 |
23481b8343288aad7dd3acc8cda728928e537604 | 1,578 | py | Python | zhihu_spider/pipelines.py | Moonwly/zhihu_spider | 609e78f3f68045697456a59c9d6867ee0b1a9c99 | [
"WTFPL"
] | null | null | null | zhihu_spider/pipelines.py | Moonwly/zhihu_spider | 609e78f3f68045697456a59c9d6867ee0b1a9c99 | [
"WTFPL"
] | null | null | null | zhihu_spider/pipelines.py | Moonwly/zhihu_spider | 609e78f3f68045697456a59c9d6867ee0b1a9c99 | [
"WTFPL"
] | 1 | 2020-03-01T11:30:51.000Z | 2020-03-01T11:30:51.000Z | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
import pymysql
from zhihu_spider.misc.all_secret_set import mysql_config
import logging
from zhihu_spider.misc.mysql_pool import ConnectionPool
from zhihu_spider.items import *
from scrapy.exceptions import DropItem
from zhihu_spider.misc.tools import spelling_insert_sql, hump2underline
item_class_list = [
UserInfo,
Question,
Answer,
Article,
CollectAnswer,
AgreeAnswer,
FollowQuestion,
CollectQuestion,
AgreeArticle,
CollectArticle
]
class ZhihuSpiderPipeLine(object):
def __init__(self):
pool = ConnectionPool(size=20, name='pool', **mysql_config)
self.connections = pool.get_connection()
def process_item(self, item, spider):
for item_class in item_class_list:
if isinstance(item, item_class):
self.save_item(item, hump2underline(item_class.__name__))
break
def save_item(self, item, table_name):
sql = spelling_insert_sql(item.keys(), table_name)
try:
with self.connections.cursor() as cursor:
cursor.execute(sql, dict(item))
except pymysql.err.MySQLError as e:
logging.error(e)
logging.warning("error item %s", item.__class__.__name__)
self.connections.ping(reconnect=True)
self.connections.rollback()
except Exception as e:
logging.error(e)
raise DropItem('item exception', sql)
def close_spider(self, spider):
self.connections.close()
| 27.684211 | 73 | 0.666033 |
07ed80658a549fe138acf35c8935bfe6ece0a233 | 3,638 | py | Python | orp/rendering.py | Outbreak-Team/outbreak-rp-renderer | 05036182c4b54b1011e643e0dcbcb82253b23667 | [
"MIT"
] | null | null | null | orp/rendering.py | Outbreak-Team/outbreak-rp-renderer | 05036182c4b54b1011e643e0dcbcb82253b23667 | [
"MIT"
] | null | null | null | orp/rendering.py | Outbreak-Team/outbreak-rp-renderer | 05036182c4b54b1011e643e0dcbcb82253b23667 | [
"MIT"
] | null | null | null | import argparse
import os
import sys
import bpy
"""
blender test.blend -b -P rendering.py -- --r 512 --m normalmap --o ./normalmap.png
blender test.blend -b -P rendering.py -- --r 512 --m heightmap --o ./heightmap.png
"""
dirpath = os.path.dirname(os.path.abspath(__file__))
def render_heightmap(img_resolution: int, out_path: str) -> str:
bpy.context.window.scene = bpy.data.scenes["Scene"]
bpy.ops.object.select_all(action='DESELECT')
# Объекты, объём которых запекаем
objs_collection = bpy.data.collections["objects_for_baking"].all_objects
# Активируем во всех объектах нужный нод Material Output
for obj in objs_collection:
for mat_slot in obj.material_slots:
mat = mat_slot.material
# Активируем Material Output для карты нормалей
mat.node_tree.nodes.active = mat.node_tree.nodes["heightmap_out"]
bpy.context.scene.render.filepath = out_path
bpy.context.scene.render.engine = 'BLENDER_EEVEE'
bpy.context.scene.view_settings.view_transform = 'Standard'
bpy.context.scene.render.image_settings.color_mode = 'BW'
bpy.context.scene.render.resolution_y = img_resolution
bpy.context.scene.render.resolution_x = img_resolution
bpy.data.objects["bakescreen"].hide_render = True
bpy.ops.render.render('INVOKE_DEFAULT', write_still=True)
return out_path
def bake_normalmap(img_resolution: int, out_path: str) -> str:
""" Запекает карту нормалей в режиме selected to active.
Предпологается, что существует объект с названием `bakescreen`
и с одноимённым материалом. На этот объект запекаются нормали
с объекта `object_name`. """
bpy.context.window.scene = bpy.data.scenes["Scene"]
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.render.engine = 'CYCLES'
# Объект, на который запекаем
bakescreen_obj = bpy.data.objects["bakescreen"]
# Объекты, объём которых запекаем
objs_collection = bpy.data.collections["objects_for_baking"].all_objects
# Выделяем объекты и делаем активными Material Output'ы
for obj in objs_collection:
obj.select_set(True)
for mat_slot in obj.material_slots:
mat = mat_slot.material
# Активируем Material Output для карты нормалей
mat.node_tree.nodes.active = mat.node_tree.nodes["normalmap_out"]
# Объекты выделены (выше), а bakescreen делаем активным
bakescreen_obj.select_set(True)
bpy.context.view_layer.objects.active = bakescreen_obj
# Создаём картинку для запекания
bake_img = bpy.data.images.new('bake', img_resolution, img_resolution)
# Создаём нод с картинкой, активируем
nodes = bakescreen_obj.material_slots[0].material.node_tree.nodes
texture_node = nodes.new('ShaderNodeTexImage')
texture_node.select = True
nodes.active = texture_node
texture_node.image = bake_img
bpy.context.scene.render.image_settings.color_mode = 'RGB'
bpy.context.scene.render.bake.use_selected_to_active = True
bpy.ops.object.bake(type='NORMAL', save_mode='EXTERNAL')
bake_img.save_render(filepath=out_path)
return out_path
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", "--r", help="output image side in pixels")
parser.add_argument("--out", "--o", help="Output file path")
parser.add_argument("--map", "--m", help="heightmap or normalmap")
args = parser.parse_args(sys.argv[sys.argv.index("--")+1:])
if args.map == "heightmap":
render_heightmap(int(args.resolution), os.path.abspath(args.out))
else:
bake_normalmap(int(args.resolution), os.path.abspath(args.out))
| 37.122449 | 86 | 0.717977 |
ad6751ffdf2a32e8cebd339dd64d9bada4a28648 | 2,059 | py | Python | src/tsadmsite/tests/test.py | tsadm/webapp | 85056841fbaa06de18844630977b163a6a999e8a | [
"BSD-3-Clause"
] | null | null | null | src/tsadmsite/tests/test.py | tsadm/webapp | 85056841fbaa06de18844630977b163a6a999e8a | [
"BSD-3-Clause"
] | null | null | null | src/tsadmsite/tests/test.py | tsadm/webapp | 85056841fbaa06de18844630977b163a6a999e8a | [
"BSD-3-Clause"
] | null | null | null | from tsadm.tests import TSAdmTestBase
from ..models import SiteDB, SiteEnvDB, SiteEnvACL
from tsadmhost.models import HostDB
class TSAdmSiteTest(TSAdmTestBase):
site = None
def setUp(self):
super(TSAdmSiteTest, self).setUp()
self.site = SiteDB.objects.get(name='s0')
def test_Site(self):
self.assertEqual(self.site.id, 1)
def test_HomeView(self):
resp = self.client.get(self.getURL('home'))
self.assertContains(resp, 'TEST:site:s0', count=1, status_code=200)
self.assertContains(resp, 'TEST:site:s1', count=1, status_code=200)
self.assertNotContains(resp, 'TEST:site:s2', status_code=200)
def test_SiteView(self):
resp = self.client.get(self.getURL('site:home', kwargs={'name': 's0'}))
self.assertContains(resp, 'TEST:site.env:dev', count=1, status_code=200)
self.assertContains(resp, 'TEST:site.env:test', count=1, status_code=200)
resp = self.client.get(self.getURL('site:home', kwargs={'name': 's1'}))
self.assertContains(resp, 'TEST:site.env:test', count=1, status_code=200)
self.assertNotContains(resp, 'TEST:site.env:prod', status_code=200)
def test_SiteViewNotFound(self):
resp = self.client.get(
self.getURL('site:home', kwargs={'name': 'INVALID'}),
)
self.assertEqual(resp.status_code, 400)
def test_SiteViewNoEnvs(self):
resp = self.client.get(
self.getURL('site:home', kwargs={'name': 's2'}),
)
self.assertEqual(resp.status_code, 400)
def test_SiteEnvView(self):
resp = self.client.get(self.getURL('site:env', kwargs={'site': 's0', 'env': 'dev'}))
self.assertContains(resp, 'TEST:site.name:s0', count=1, status_code=200)
self.assertContains(resp, 'TEST:site.env:dev', count=1, status_code=200)
def test_SiteEnvViewNoAccess(self):
resp = self.client.get(
self.getURL('site:env', kwargs={'site': 's1', 'env': 'prod'}),
)
self.assertEqual(resp.status_code, 400)
| 34.898305 | 92 | 0.641088 |
f7c5d2891ba6f76f5a96a24e946389d67fd215aa | 6,437 | py | Python | wolf/data/image.py | andrecianflone/wolf | 826bbedc58d4d29871110349356868066a3108e6 | [
"Apache-2.0"
] | 75 | 2020-03-31T22:21:04.000Z | 2022-03-20T10:58:17.000Z | wolf/data/image.py | andrecianflone/wolf | 826bbedc58d4d29871110349356868066a3108e6 | [
"Apache-2.0"
] | 3 | 2021-02-03T07:07:14.000Z | 2022-03-08T20:58:43.000Z | wolf/data/image.py | andrecianflone/wolf | 826bbedc58d4d29871110349356868066a3108e6 | [
"Apache-2.0"
] | 10 | 2020-04-27T05:31:44.000Z | 2021-11-21T14:11:16.000Z | import os
import scipy.io
import numpy as np
import torch
from torchvision import datasets, transforms
def load_datasets(dataset, image_size, data_path):
if dataset == 'omniglot':
return load_omniglot()
elif dataset == 'mnist':
return load_mnist()
elif dataset.startswith('lsun'):
category = None if dataset == 'lsun' else dataset[5:]
return load_lsun(data_path, category, image_size)
elif dataset == 'cifar10':
return load_cifar10(data_path)
elif dataset == 'imagenet':
return load_imagenet(data_path, image_size)
elif dataset == 'celeba':
return load_celeba(data_path, image_size)
else:
raise ValueError('unknown data set %s' % dataset)
def load_omniglot():
def reshape_data(data):
return data.T.reshape((-1, 1, 28, 28))
omni_raw = scipy.io.loadmat('data/omniglot/chardata.mat')
train_data = reshape_data(omni_raw['data']).astype(np.float32)
train_label = omni_raw['target'].argmax(axis=0)
test_data = reshape_data(omni_raw['testdata']).astype(np.float32)
test_label = omni_raw['testtarget'].argmax(axis=0)
train_data = torch.from_numpy(train_data).float()
train_label = torch.from_numpy(train_label).long()
test_data = torch.from_numpy(test_data).float()
test_label = torch.from_numpy(test_label).long()
return [(train_data[i], train_label[i]) for i in range(len(train_data))], \
[(test_data[i], test_label[i]) for i in range(len(test_data))]
def load_mnist():
train_data, train_label = torch.load('data/mnist/processed/training.pt')
test_data, test_label = torch.load('data/mnist/processed/test.pt')
train_data = train_data.float().div(256).unsqueeze(1)
test_data = test_data.float().div(256).unsqueeze(1)
return [(train_data[i], train_label[i]) for i in range(len(train_data))], \
[(test_data[i], test_label[i]) for i in range(len(test_data))]
def load_lsun(data_path, category, image_size):
if category is None:
classes_train = 'train'
classes_val = 'val'
else:
classes_train = [category + '_train']
classes_val = [category + '_val']
train_data = datasets.LSUN(data_path, classes=classes_train,
transform=transforms.Compose([
transforms.CenterCrop(256),
transforms.Resize(image_size),
transforms.ToTensor(),
]))
val_data = datasets.LSUN(data_path, classes=classes_val,
transform=transforms.Compose([
transforms.CenterCrop(256),
transforms.Resize(image_size),
transforms.ToTensor(),
]))
return train_data, val_data
def load_cifar10(data_path):
imageSize = 32
train_data = datasets.CIFAR10(data_path, train=True,
download=True,
transform=transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(imageSize),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor()
]))
test_data = datasets.CIFAR10(data_path, train=False,
transform=transforms.Compose([
transforms.ToTensor()
]))
return train_data, test_data
def load_imagenet(data_path, image_size):
data_path = os.path.join(data_path, 'imagenet{}x{}'.format(image_size, image_size))
train_data = datasets.ImageFolder(os.path.join(data_path, 'train'),
transform=transforms.Compose([
transforms.ToTensor()
]))
val_data = datasets.ImageFolder(os.path.join(data_path, 'val'),
transform=transforms.Compose([
transforms.ToTensor()
]))
return train_data, val_data
def load_celeba(data_path, image_size):
train_data = datasets.ImageFolder(os.path.join(data_path, 'train'),
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor()
]))
val_data = datasets.ImageFolder(os.path.join(data_path, 'val'),
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
]))
return train_data, val_data
def get_batch(data, indices):
imgs = []
labels = []
for index in indices:
img, label = data[index]
imgs.append(img)
labels.append(label)
return torch.stack(imgs, dim=0), torch.LongTensor(labels)
def iterate_minibatches(data, indices, batch_size, shuffle):
if shuffle:
np.random.shuffle(indices)
for start_idx in range(0, len(indices), batch_size):
excerpt = indices[start_idx:start_idx + batch_size]
yield get_batch(data, excerpt)
def binarize_image(img):
return torch.rand(img.size()).type_as(img).le(img).float()
def binarize_data(data):
return [(binarize_image(img), label) for img, label in data]
def preprocess(img, n_bits, noise=None):
n_bins = 2. ** n_bits
# rescale to 255
img = img.mul(255)
if n_bits < 8:
img = torch.floor(img.div(256. / n_bins))
if noise is not None:
# [batch, nsamples, channels, H, W]
img = img.unsqueeze(1) + noise
# normalize
img = img.div(n_bins)
img = (img - 0.5).div(0.5)
return img
def postprocess(img, n_bits):
n_bins = 2. ** n_bits
# re-normalize
img = img.mul(0.5) + 0.5
img = img.mul(n_bins)
# scale
img = torch.floor(img) * (256. / n_bins)
img = img.clamp(0, 255).div(255)
return img
| 36.162921 | 87 | 0.553985 |
1e56e35e32129ec909adf4afd614f3da0e7e5f39 | 325 | py | Python | pytdx/errors.py | AtlantixJJ/vnpy | 28992c7d5391f6dd42a14b481d01ceafde048b5f | [
"MIT"
] | 13 | 2019-06-07T04:34:09.000Z | 2022-03-21T07:46:01.000Z | pytdx/errors.py | AtlantixJJ/vnpy | 28992c7d5391f6dd42a14b481d01ceafde048b5f | [
"MIT"
] | 1 | 2020-04-21T02:42:32.000Z | 2020-04-21T02:42:32.000Z | venv/lib/python3.7/site-packages/pytdx/errors.py | CatTiger/vnpy | 7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b | [
"MIT"
] | 2 | 2021-07-08T03:44:41.000Z | 2021-09-15T00:41:19.000Z | # coding=utf-8
class TdxConnectionError(Exception):
"""
当连接服务器出错的时候,会抛出的异常
"""
pass
class TdxFunctionCallError(Exception):
"""
当行数调用出错的时候
"""
def __init__(self, *args, **kwargs):
super(TdxFunctionCallError, self).__init__(*args, **kwargs)
self.original_exception = None
| 15.47619 | 67 | 0.630769 |
80372404eab16a83e00938c5fac9bccd3eafe7d4 | 5,239 | py | Python | sdk/python/pulumi_azure_native/compute/v20190301/get_gallery.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20190301/get_gallery.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20190301/get_gallery.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetGalleryResult',
'AwaitableGetGalleryResult',
'get_gallery',
]
@pulumi.output_type
class GetGalleryResult:
"""
Specifies information about the Shared Image Gallery that you want to create or update.
"""
def __init__(__self__, description=None, id=None, identifier=None, location=None, name=None, provisioning_state=None, tags=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identifier and not isinstance(identifier, dict):
raise TypeError("Expected argument 'identifier' to be a dict")
pulumi.set(__self__, "identifier", identifier)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of this Shared Image Gallery resource. This property is updatable.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identifier(self) -> Optional['outputs.GalleryIdentifierResponse']:
"""
Describes the gallery unique name.
"""
return pulumi.get(self, "identifier")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetGalleryResult(GetGalleryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGalleryResult(
description=self.description,
id=self.id,
identifier=self.identifier,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_gallery(gallery_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGalleryResult:
"""
Specifies information about the Shared Image Gallery that you want to create or update.
:param str gallery_name: The name of the Shared Image Gallery.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['galleryName'] = gallery_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute/v20190301:getGallery', __args__, opts=opts, typ=GetGalleryResult).value
return AwaitableGetGalleryResult(
description=__ret__.description,
id=__ret__.id,
identifier=__ret__.identifier,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
| 33.158228 | 144 | 0.636763 |
65e42f35d4b704cd0a6ce67b66d5623716e0bdb5 | 10,930 | py | Python | argparser.py | davidegariglio/MiB | a4e8cb487073090b360e98e43ee339aedeb24815 | [
"MIT"
] | 3 | 2021-07-15T19:02:49.000Z | 2021-12-11T14:39:49.000Z | argparser.py | farzadips/MiB | a4e8cb487073090b360e98e43ee339aedeb24815 | [
"MIT"
] | null | null | null | argparser.py | farzadips/MiB | a4e8cb487073090b360e98e43ee339aedeb24815 | [
"MIT"
] | 2 | 2021-06-01T15:22:06.000Z | 2021-11-28T14:02:47.000Z | import argparse
import tasks
def modify_command_options(opts):
if opts.dataset == 'voc':
opts.num_classes = 21
if opts.dataset == 'ade':
opts.num_classes = 150
if not opts.visualize:
opts.sample_num = 0
if opts.method is not None:
if opts.method == 'FT':
pass
if opts.method == 'LWF':
opts.loss_kd = 100
if opts.method == 'LWF-MC':
opts.icarl = True
opts.icarl_importance = 10
if opts.method == 'ILT':
opts.loss_kd = 100
opts.loss_de = 100
if opts.method == 'EWC':
opts.regularizer = "ewc"
opts.reg_importance = 500
if opts.method == 'RW':
opts.regularizer = "rw"
opts.reg_importance = 100
if opts.method == 'PI':
opts.regularizer = "pi"
opts.reg_importance = 500
if opts.method == 'MiB':
opts.loss_kd = 10
opts.unce = True
opts.unkd = True
opts.init_balanced = True
opts.no_overlap = not opts.overlap
opts.no_cross_val = not opts.cross_val
opts.name = opts.method
return opts
def get_argparser():
parser = argparse.ArgumentParser()
# Performance Options
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--random_seed", type=int, default=44,
help="random seed (default: 44)")
parser.add_argument("--num_workers", type=int, default=8,
help='number of workers (default: 8)')
# Datset Options
parser.add_argument("--data_root", type=str, default='data',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='voc',
choices=['voc', 'ade'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=None,
help="num classes (default: None)")
# Method Options
# BE CAREFUL USING THIS, THEY WILL OVERRIDE ALL THE OTHER PARAMETERS.
parser.add_argument("--method", type=str, default=None,
choices=['FT', 'LWF', 'LWF-MC', 'ILT', 'EWC', 'RW', 'PI', 'MiB'],
help="The method you want to use. BE CAREFUL USING THIS, IT MAY OVERRIDE OTHER PARAMETERS.")
# Train Options
parser.add_argument("--epochs", type=int, default=30,
help="epoch number (default: 30)")
parser.add_argument("--fix_bn", action='store_true', default=False,
help='fix batch normalization during training (default: False)')
parser.add_argument("--batch_size", type=int, default=32,
help='batch size (default: 32)')
parser.add_argument("--crop_size", type=int, default=320,
help="crop size (default: 320)")
parser.add_argument("--lr", type=float, default=0.007,
help="learning rate (default: 0.007)")
parser.add_argument("--momentum", type=float, default=0.9,
help='momentum for SGD (default: 0.9)')
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--lr_policy", type=str, default='poly',
choices=['poly', 'step'], help="lr schedule policy (default: poly)")
parser.add_argument("--lr_decay_step", type=int, default=5000,
help="decay step for stepLR (default: 5000)")
parser.add_argument("--lr_decay_factor", type=float, default=0.1,
help="decay factor for stepLR (default: 0.1)")
parser.add_argument("--lr_power", type=float, default=0.9,
help="power for polyLR (default: 0.9)")
parser.add_argument("--bce", default=False, action='store_true',
help="Whether to use BCE or not (default: no)")
# Validation Options
parser.add_argument("--val_on_trainset", action='store_true', default=False,
help="enable validation on train set (default: False)")
parser.add_argument("--cross_val", action='store_true', default=False,
help="If validate on training or on validation (default: Train)")
parser.add_argument("--crop_val", action='store_false', default=True,
help='do crop for validation (default: True)')
# Logging Options
parser.add_argument("--logdir", type=str, default='./logs',
help="path to Log directory (default: ./logs)")
parser.add_argument("--name", type=str, default='Experiment',
help="name of the experiment - to append to log directory (default: Experiment)")
parser.add_argument("--sample_num", type=int, default=0,
help='number of samples for visualization (default: 0)')
parser.add_argument("--debug", action='store_true', default=False,
help="verbose option")
parser.add_argument("--visualize", action='store_false', default=True,
help="visualization on tensorboard (def: Yes)")
parser.add_argument("--print_interval", type=int, default=90,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=2,
help="epoch interval for eval (default: 2)")
parser.add_argument("--ckpt_interval", type=int, default=1,
help="epoch interval for saving model (default: 1)")
# Model Options
parser.add_argument("--backbone", type=str, default='resnet50',
choices=['resnet50', 'resnet101'], help='backbone for the body (def: resnet50)')
parser.add_argument("--output_stride", type=int, default=16,
choices=[8, 16], help='stride for the backbone (def: 16)')
parser.add_argument("--no_pretrained", action='store_true', default=True,
help='Wheather to use pretrained or not (def: True)')
parser.add_argument("--norm_act", type=str, default="iabn_sync",
choices=['iabn_sync', 'iabn', 'abn', 'std'], help='Which BN to use (def: abn_sync')
parser.add_argument("--fusion-mode", metavar="NAME", type=str, choices=["mean", "voting", "max"], default="mean",
help="How to fuse the outputs. Options: 'mean', 'voting', 'max'")
parser.add_argument("--pooling", type=int, default=32,
help='pooling in ASPP for the validation phase (def: 32)')
# Test and Checkpoint options
parser.add_argument("--test", action='store_true', default=False,
help="Whether to train or test only (def: train and test)")
parser.add_argument("--ckpt", default=None, type=str,
help="path to trained model. Leave it None if you want to retrain your model")
# Parameters for Knowledge Distillation of ILTSS (https://arxiv.org/abs/1907.13372)
parser.add_argument("--freeze", action='store_true', default=False,
help="Use this to freeze the feature extractor in incremental steps")
parser.add_argument("--loss_de", type=float, default=0., # Distillation on Encoder
help="Set this hyperparameter to a value greater than "
"0 to enable distillation on Encoder (L2)")
parser.add_argument("--loss_kd", type=float, default=0., # Distillation on Output
help="Set this hyperparameter to a value greater than "
"0 to enable Knowlesge Distillation (Soft-CrossEntropy)")
# Parameters for EWC, RW, and SI (from Riemannian Walks https://arxiv.org/abs/1801.10112)
parser.add_argument("--regularizer", default=None, type=str, choices=['ewc', 'rw', 'pi'],
help="regularizer you want to use. Default is None")
parser.add_argument("--reg_importance", type=float, default=1.,
help="set this par to a value greater than 0 to enable regularization")
parser.add_argument("--reg_alpha", type=float, default=0.9,
help="Hyperparameter for RW and EWC that controls the update of Fisher Matrix")
parser.add_argument("--reg_no_normalize", action='store_true', default=False,
help="If EWC, RW, PI must be normalized or not")
parser.add_argument("--reg_iterations", type=int, default=10,
help="If RW, the number of iterations after each the update of the score is done")
# Arguments for ICaRL (from https://arxiv.org/abs/1611.07725)
parser.add_argument("--icarl", default=False, action='store_true',
help="If enable ICaRL or not (def is not)")
parser.add_argument("--icarl_importance", type=float, default=1.,
help="the regularization importance in ICaRL (def is 1.)")
parser.add_argument("--icarl_disjoint", action='store_true', default=False,
help="Which version of icarl is to use (def: combined)")
parser.add_argument("--icarl_bkg", action='store_true', default=False,
help="If use background from GT (def: No)")
# METHODS
parser.add_argument("--init_balanced", default=False, action='store_true',
help="Enable Background-based initialization for new classes")
parser.add_argument("--unkd", default=False, action='store_true',
help="Enable Unbiased Knowledge Distillation instead of Knowledge Distillation")
parser.add_argument("--alpha", default=1., type=float,
help="The parameter to hard-ify the soft-labels. Def is 1.")
parser.add_argument("--unce", default=False, action='store_true',
help="Enable Unbiased Cross Entropy instead of CrossEntropy")
# Incremental parameters
parser.add_argument("--task", type=str, default="19-1", choices=tasks.get_task_list(),
help="Task to be executed (default: 19-1)")
parser.add_argument("--step", type=int, default=0,
help="The incremental step in execution (default: 0)")
parser.add_argument("--no_mask", action='store_true', default=False,
help="Use this to not mask the old classes in new training set")
parser.add_argument("--overlap", action='store_true', default=False,
help="Use this to not use the new classes in the old training set")
parser.add_argument("--step_ckpt", default=None, type=str,
help="path to trained model at previous step. Leave it None if you want to use def path")
parser.add_argument('--opt_level', type=str, choices=['O0', 'O1', 'O2', 'O3'], default='O0')
return parser
| 54.108911 | 117 | 0.597347 |
27631f6471b19c235d7c8a60512712d0cfb8173d | 430 | py | Python | app/core/migrations/0006_recipe_image.py | miguelmestre/recipe-app-api | 00ae7e4475b827e1643a6af8e15ea4cb1e8da4fd | [
"MIT"
] | null | null | null | app/core/migrations/0006_recipe_image.py | miguelmestre/recipe-app-api | 00ae7e4475b827e1643a6af8e15ea4cb1e8da4fd | [
"MIT"
] | null | null | null | app/core/migrations/0006_recipe_image.py | miguelmestre/recipe-app-api | 00ae7e4475b827e1643a6af8e15ea4cb1e8da4fd | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-02 21:42
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| 21.5 | 93 | 0.62093 |
2609328d7f8665a09e46f7b965420c5b941928f1 | 585 | py | Python | iommi/style_font_awesome_4.py | ara4711/iommi | e92ea7ca6c0a084f5385009a393f6c6bf5952d55 | [
"BSD-3-Clause"
] | null | null | null | iommi/style_font_awesome_4.py | ara4711/iommi | e92ea7ca6c0a084f5385009a393f6c6bf5952d55 | [
"BSD-3-Clause"
] | null | null | null | iommi/style_font_awesome_4.py | ara4711/iommi | e92ea7ca6c0a084f5385009a393f6c6bf5952d55 | [
"BSD-3-Clause"
] | null | null | null | from iommi._web_compat import mark_safe
from iommi.style import Style
from iommi.fragment import html
font_awesome_4 = Style(
assets__icons=html.link(
attrs__rel="stylesheet",
attrs__href="https://maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css",
),
Column__shortcuts=dict(
icon__extra=dict(
icon_attrs__class={'fa': True, 'fa-lg': True},
icon_prefix='fa-',
),
edit__extra__icon='pencil-square-o',
delete__extra__icon='trash-o',
download__extra__icon='download',
),
)
| 29.25 | 98 | 0.652991 |
79bc572d57d832c45c46459e09b5e5896a0f2437 | 2,600 | py | Python | awx/conf/models.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | awx/conf/models.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | 2 | 2022-02-10T11:57:21.000Z | 2022-02-27T22:43:44.000Z | awx/conf/models.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import json
# Django
from django.db import models
# AWX
from awx.main.fields import JSONBlob
from awx.main.models.base import CreatedModifiedModel, prevent_search
from awx.main.utils import encrypt_field
from awx.conf import settings_registry
__all__ = ['Setting']
class Setting(CreatedModifiedModel):
key = models.CharField(max_length=255)
value = JSONBlob(null=True)
user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE))
def __str__(self):
try:
json_value = json.dumps(self.value)
except ValueError:
# In the rare case the DB value is invalid JSON.
json_value = u'<Invalid JSON>'
if self.user:
return u'{} ({}) = {}'.format(self.key, self.user, json_value)
else:
return u'{} = {}'.format(self.key, json_value)
def save(self, *args, **kwargs):
encrypted = settings_registry.is_setting_encrypted(self.key)
new_instance = not bool(self.pk)
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
# When first saving to the database, don't store any encrypted field
# value, but instead save it until after the instance is created.
# Otherwise, store encrypted value to the database.
if encrypted:
if new_instance:
self._saved_value = self.value
self.value = ''
else:
self.value = encrypt_field(self, 'value')
if 'value' not in update_fields:
update_fields.append('value')
super(Setting, self).save(*args, **kwargs)
# After saving a new instance for the first time, set the encrypted
# field and save again.
if encrypted and new_instance:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.value = self._saved_value
self.save(update_fields=['value'])
@classmethod
def get_cache_key(self, key):
return key
@classmethod
def get_cache_id_key(self, key):
return '{}_ID'.format(key)
import awx.conf.signals # noqa
from awx.main.registrar import activity_stream_registrar # noqa
activity_stream_registrar.connect(Setting)
import awx.conf.access # noqa
| 32.911392 | 149 | 0.646154 |
ec2b8472fe59a70575e167927788e2acb8dcd2b0 | 2,065 | py | Python | indico/modules/legal/__init__.py | UNOG-Indico/UNOG-Indico-v2 | 4fa4393cc1f3b453a69f5e0ea3b52c18337831a5 | [
"MIT"
] | null | null | null | indico/modules/legal/__init__.py | UNOG-Indico/UNOG-Indico-v2 | 4fa4393cc1f3b453a69f5e0ea3b52c18337831a5 | [
"MIT"
] | null | null | null | indico/modules/legal/__init__.py | UNOG-Indico/UNOG-Indico-v2 | 4fa4393cc1f3b453a69f5e0ea3b52c18337831a5 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import render_template, session
from indico.core import signals
from indico.core.settings import SettingsProxy
from indico.util.i18n import _
from indico.web.flask.templating import template_hook
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
_DEFAULT_RESTRICTED_DISCLAIMER = ("Circulation to people other than the intended audience is not authorized. "
"You are obliged to treat the information with the appropriate level of "
"confidentiality.")
_DEFAULT_PROTECTED_DISCLAIMER = ("As such, this information is intended for an internal audience only. "
"You are obliged to treat the information with the appropriate level of "
"confidentiality.")
legal_settings = SettingsProxy('legal', {
'network_protected_disclaimer': _DEFAULT_PROTECTED_DISCLAIMER,
'restricted_disclaimer': _DEFAULT_RESTRICTED_DISCLAIMER,
'tos_url': '',
'tos': '',
'privacy_policy_url': '',
'privacy_policy': ''
})
@signals.menu.items.connect_via('admin-sidemenu')
def _sidemenu_items(sender, **kwargs):
if session.user.is_admin:
yield SideMenuItem('legal_messages', _('Legal/Disclaimers'), url_for('legal.manage'), section='security')
@template_hook('page-footer', priority=50)
def _inject_tos_footer(**kwargs):
url = legal_settings.get('tos_url')
if url or legal_settings.get('tos'):
return render_template('legal/tos_footer.html', url=url)
@template_hook('page-footer', priority=51)
def _inject_privacy_footer(**kwargs):
url = legal_settings.get('privacy_policy_url')
if url or legal_settings.get('privacy_policy'):
return render_template('legal/privacy_footer.html', url=url)
| 36.875 | 113 | 0.710896 |
9bce026e1d316799a2e748f0f57ad7814c32fcde | 50,367 | py | Python | prompt_toolkit/key_binding/bindings/vi.py | gigforks/python-prompt-toolkit | d12cdbb556bef84011792108b1027930b81c4813 | [
"BSD-3-Clause"
] | 1 | 2016-10-01T20:28:31.000Z | 2016-10-01T20:28:31.000Z | prompt_toolkit/key_binding/bindings/vi.py | gigforks/python-prompt-toolkit | d12cdbb556bef84011792108b1027930b81c4813 | [
"BSD-3-Clause"
] | null | null | null | prompt_toolkit/key_binding/bindings/vi.py | gigforks/python-prompt-toolkit | d12cdbb556bef84011792108b1027930b81c4813 | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=function-redefined
from __future__ import unicode_literals
from prompt_toolkit.buffer import ClipboardData, indent, unindent
from prompt_toolkit.document import Document
from prompt_toolkit.enums import IncrementalSearchDirection, SEARCH_BUFFER, SYSTEM_BUFFER
from prompt_toolkit.filters import Filter, Condition, HasArg, Always, to_cli_filter, IsReadOnly
from prompt_toolkit.key_binding.vi_state import CharacterFind, InputMode
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.utils import find_window_for_buffer_name
from prompt_toolkit.selection import SelectionType
from .utils import create_handle_decorator
from .scroll import scroll_forward, scroll_backward, scroll_half_page_up, scroll_half_page_down, scroll_one_line_up, scroll_one_line_down, scroll_page_up, scroll_page_down
import prompt_toolkit.filters as filters
import codecs
__all__ = (
'load_vi_bindings',
'load_vi_search_bindings',
'load_vi_system_bindings',
'load_extra_vi_page_navigation_bindings',
)
class ViStateFilter(Filter):
"""
Filter to enable some key bindings only in a certain Vi input mode.
:param get_vi_state: Callable that takes a `CommandLineInterface` and
returns a :class:`~prompt_toolkit.key_binding.vi_state.ViState` instance.
"""
# Note: The reason for making get_vi_state a callable, is that this way,
# the registry of key bindings becomes more stateless and can be
# reused for multiple CommandLineInterface instances.
def __init__(self, get_vi_state, mode):
assert callable(get_vi_state)
self.get_vi_state = get_vi_state
self.mode = mode
def __call__(self, cli):
return self.get_vi_state(cli).input_mode == self.mode
class CursorRegion(object):
"""
Return struct for functions wrapped in ``change_delete_move_yank_handler``.
Both `start` and `end` are relative to the current cursor position.
"""
def __init__(self, start, end=0):
self.start = start
self.end = end
def sorted(self):
"""
Return a (start, end) tuple where start <= end.
"""
if self.start < self.end:
return self.start, self.end
else:
return self.end, self.start
def load_vi_bindings(registry, get_vi_state, enable_visual_key=Always(), get_search_state=None, filter=None):
"""
Vi extensions.
# Overview of Readline Vi commands:
# http://www.catonmat.net/download/bash-vi-editing-mode-cheat-sheet.pdf
:param get_vi_state: Callable that takes a CommandLineInterface instances and returns the used ViState.
:param enable_visual_key: Filter to enable lowercase 'v' bindings. A reason to disable these
are to support open-in-editor functionality. These key bindings conflict.
:param get_search_state: None or a callable that takes a CommandLineInterface and returns a SearchState.
"""
# Note: Some key bindings have the "~IsReadOnly()" filter added. This
# prevents the handler to be executed when the focus is on a
# read-only buffer.
# This is however only required for those that change the ViState to
# INSERT mode. The `Buffer` class itself throws the
# `EditReadOnlyBuffer` exception for any text operations which is
# handled correctly. There is no need to add "~IsReadOnly" to all key
# bindings that do text manipulation.
assert callable(get_vi_state)
enable_visual_key = to_cli_filter(enable_visual_key)
# Default get_search_state.
if get_search_state is None:
def get_search_state(cli): return cli.search_state
handle = create_handle_decorator(registry, filter)
insert_mode = ViStateFilter(get_vi_state, InputMode.INSERT) & ~ filters.HasSelection()
navigation_mode = ViStateFilter(get_vi_state, InputMode.NAVIGATION) & ~ filters.HasSelection()
replace_mode = ViStateFilter(get_vi_state, InputMode.REPLACE) & ~ filters.HasSelection()
selection_mode = filters.HasSelection()
vi_transform_functions = [
# Rot 13 transformation
(('g', '?'), lambda string: codecs.encode(string, 'rot_13')),
# To lowercase
(('g', 'u'), lambda string: string.lower()),
# To uppercase.
(('g', 'U'), lambda string: string.upper()),
# Swap case.
# (XXX: If we would implement 'tildeop', the 'g' prefix is not required.)
(('g', '~'), lambda string: string.swapcase()),
]
def check_cursor_position(event):
"""
After every command, make sure that if we are in navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
buffer = event.current_buffer
if (
(filter is None or filter(event.cli)) and # First make sure that this key bindings are active.
get_vi_state(event.cli).input_mode == InputMode.NAVIGATION and
buffer.document.is_cursor_at_the_end_of_line and
len(buffer.document.current_line) > 0):
buffer.cursor_position -= 1
registry.on_handler_called += check_cursor_position
@handle(Keys.Escape)
def _(event):
"""
Escape goes to vi navigation mode.
"""
buffer = event.current_buffer
vi_state = get_vi_state(event.cli)
if vi_state.input_mode in (InputMode.INSERT, InputMode.REPLACE):
buffer.cursor_position += buffer.document.get_cursor_left_position()
vi_state.input_mode = InputMode.NAVIGATION
if bool(buffer.selection_state):
buffer.exit_selection()
@handle('k', filter=selection_mode)
def _(event):
"""
Arrow up in selection mode.
"""
event.current_buffer.cursor_up(count=event.arg)
@handle('j', filter=selection_mode)
def _(event):
"""
Arrow down in selection mode.
"""
event.current_buffer.cursor_down(count=event.arg)
@handle('k', filter=navigation_mode)
@handle(Keys.Up, filter=navigation_mode)
@handle(Keys.ControlP, filter=navigation_mode)
def _(event):
"""
Arrow up and ControlP in navigation mode go up.
"""
b = event.current_buffer
b.auto_up(count=event.arg)
@handle('j', filter=navigation_mode)
@handle(Keys.Down, filter=navigation_mode)
@handle(Keys.ControlN, filter=navigation_mode)
def _(event):
"""
Arrow down and Control-N in navigation mode.
"""
b = event.current_buffer
b.auto_down(count=event.arg)
@handle(Keys.Backspace, filter=navigation_mode)
def _(event):
"""
In navigation-mode, move cursor.
"""
event.current_buffer.cursor_position += \
event.current_buffer.document.get_cursor_left_position(count=event.arg)
@handle(Keys.ControlV, Keys.Any, filter=insert_mode)
def _(event):
"""
Insert a character literally (quoted insert).
"""
event.current_buffer.insert_text(event.data, overwrite=False)
@handle(Keys.ControlN, filter=insert_mode)
def _(event):
b = event.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=True)
@handle(Keys.ControlP, filter=insert_mode)
def _(event):
"""
Control-P: To previous completion.
"""
b = event.current_buffer
if b.complete_state:
b.complete_previous()
else:
event.cli.start_completion(select_last=True)
@handle(Keys.ControlY, filter=insert_mode)
def _(event):
"""
Accept current completion.
"""
event.current_buffer.complete_state = None
@handle(Keys.ControlE, filter=insert_mode)
def _(event):
"""
Cancel completion. Go back to originally typed text.
"""
event.current_buffer.cancel_completion()
@handle(Keys.ControlJ, filter=navigation_mode)
def _(event):
"""
In navigation mode, pressing enter will always return the input.
"""
b = event.current_buffer
if b.accept_action.is_returnable:
b.accept_action.validate_and_handle(event.cli, b)
# ** In navigation mode **
# List of navigation commands: http://hea-www.harvard.edu/~fine/Tech/vi.html
@handle(Keys.Insert, filter=navigation_mode)
def _(event):
" Presing the Insert key. "
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('a', filter=navigation_mode & ~IsReadOnly())
# ~IsReadOnly, because we want to stay in navigation mode for
# read-only buffers.
def _(event):
event.current_buffer.cursor_position += event.current_buffer.document.get_cursor_right_position()
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('A', filter=navigation_mode & ~IsReadOnly())
def _(event):
event.current_buffer.cursor_position += event.current_buffer.document.get_end_of_line_position()
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('C', filter=navigation_mode & ~IsReadOnly())
def _(event):
"""
# Change to end of line.
# Same as 'c$' (which is implemented elsewhere.)
"""
buffer = event.current_buffer
deleted = buffer.delete(count=buffer.document.get_end_of_line_position())
event.cli.clipboard.set_text(deleted)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('c', 'c', filter=navigation_mode & ~IsReadOnly())
@handle('S', filter=navigation_mode & ~IsReadOnly())
def _(event): # TODO: implement 'arg'
"""
Change current line
"""
buffer = event.current_buffer
# We copy the whole line.
data = ClipboardData(buffer.document.current_line, SelectionType.LINES)
event.cli.clipboard.set_data(data)
# But we delete after the whitespace
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
buffer.delete(count=buffer.document.get_end_of_line_position())
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('D', filter=navigation_mode)
def _(event):
buffer = event.current_buffer
deleted = buffer.delete(count=buffer.document.get_end_of_line_position())
event.cli.clipboard.set_text(deleted)
@handle('d', 'd', filter=navigation_mode)
def _(event):
"""
Delete line. (Or the following 'n' lines.)
"""
buffer = event.current_buffer
# Split string in before/deleted/after text.
lines = buffer.document.lines
before = '\n'.join(lines[:buffer.document.cursor_position_row])
deleted = '\n'.join(lines[buffer.document.cursor_position_row: buffer.document.cursor_position_row + event.arg])
after = '\n'.join(lines[buffer.document.cursor_position_row + event.arg:])
# Set new text.
if before and after:
before = before + '\n'
# Set text and cursor position.
buffer.document = Document(
text=before + after,
# Cursor At the start of the first 'after' line, after the leading whitespace.
cursor_position = len(before) + len(after) - len(after.lstrip(' ')))
# Set clipboard data
event.cli.clipboard.set_data(ClipboardData(deleted, SelectionType.LINES))
@handle('i', filter=navigation_mode & ~IsReadOnly())
def _(event):
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('I', filter=navigation_mode & ~IsReadOnly())
def _(event):
get_vi_state(event.cli).input_mode = InputMode.INSERT
event.current_buffer.cursor_position += event.current_buffer.document.get_start_of_line_position(after_whitespace=True)
@handle('J', filter=navigation_mode)
def _(event):
""" Join lines. """
for i in range(event.arg):
event.current_buffer.join_next_line()
@handle('J', filter=selection_mode)
def _(event):
""" Join selected lines. """
event.current_buffer.join_selected_lines()
@handle('n', filter=navigation_mode)
def _(event): # XXX: use `change_delete_move_yank_handler`
"""
Search next.
"""
event.current_buffer.apply_search(
get_search_state(event.cli), include_current_position=False,
count=event.arg)
@handle('N', filter=navigation_mode)
def _(event): # TODO: use `change_delete_move_yank_handler`
"""
Search previous.
"""
event.current_buffer.apply_search(
~get_search_state(event.cli), include_current_position=False,
count=event.arg)
@handle('p', filter=navigation_mode)
def _(event):
"""
Paste after
"""
event.current_buffer.paste_clipboard_data(
event.cli.clipboard.get_data(),
count=event.arg)
@handle('P', filter=navigation_mode)
def _(event):
"""
Paste before
"""
event.current_buffer.paste_clipboard_data(
event.cli.clipboard.get_data(),
before=True,
count=event.arg)
@handle('r', Keys.Any, filter=navigation_mode)
def _(event):
"""
Replace single character under cursor
"""
event.current_buffer.insert_text(event.data * event.arg, overwrite=True)
event.current_buffer.cursor_position -= 1
@handle('R', filter=navigation_mode)
def _(event):
"""
Go to 'replace'-mode.
"""
get_vi_state(event.cli).input_mode = InputMode.REPLACE
@handle('s', filter=navigation_mode & ~IsReadOnly())
def _(event):
"""
Substitute with new text
(Delete character(s) and go to insert mode.)
"""
text = event.current_buffer.delete(count=event.arg)
event.cli.clipboard.set_text(text)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('u', filter=navigation_mode, save_before=(lambda e: False))
def _(event):
for i in range(event.arg):
event.current_buffer.undo()
@handle('V', filter=navigation_mode)
def _(event):
"""
Start lines selection.
"""
event.current_buffer.start_selection(selection_type=SelectionType.LINES)
@handle(Keys.ControlV, filter=navigation_mode)
def _(event):
" Enter block selection mode. "
event.current_buffer.start_selection(selection_type=SelectionType.BLOCK)
@handle('V', filter=selection_mode)
def _(event):
"""
Exit line selection mode, or go from non line selection mode to line
selection mode.
"""
selection_state = event.current_buffer.selection_state
if selection_state.type != SelectionType.LINES:
selection_state.type = SelectionType.LINES
else:
event.current_buffer.exit_selection()
@handle('v', filter=navigation_mode & enable_visual_key)
def _(event):
" Enter character selection mode. "
event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS)
@handle('v', filter=selection_mode)
def _(event):
"""
Exit character selection mode, or go from non-character-selection mode
to character selection mode.
"""
selection_state = event.current_buffer.selection_state
if selection_state.type != SelectionType.CHARACTERS:
selection_state.type = SelectionType.CHARACTERS
else:
event.current_buffer.exit_selection()
@handle(Keys.ControlV, filter=selection_mode)
def _(event):
"""
Exit block selection mode, or go from non block selection mode to block
selection mode.
"""
selection_state = event.current_buffer.selection_state
if selection_state.type != SelectionType.BLOCK:
selection_state.type = SelectionType.BLOCK
else:
event.current_buffer.exit_selection()
@handle('a', 'w', filter=selection_mode)
@handle('a', 'W', filter=selection_mode)
def _(event):
"""
Switch from visual linewise mode to visual characterwise mode.
"""
buffer = event.current_buffer
if buffer.selection_state and buffer.selection_state.type == SelectionType.LINES:
buffer.selection_state.type = SelectionType.CHARACTERS
@handle('x', filter=navigation_mode)
def _(event):
"""
Delete character.
"""
text = event.current_buffer.delete(count=event.arg)
event.cli.clipboard.set_text(text)
@handle('x', filter=selection_mode)
@handle('d', filter=selection_mode)
def _(event):
"""
Cut selection.
"""
clipboard_data = event.current_buffer.cut_selection()
event.cli.clipboard.set_data(clipboard_data)
@handle('c', filter=selection_mode & ~IsReadOnly())
def _(event):
"""
Change selection (cut and go to insert mode).
"""
clipboard_data = event.current_buffer.cut_selection()
event.cli.clipboard.set_data(clipboard_data)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('y', filter=selection_mode)
def _(event):
"""
Copy selection.
"""
clipboard_data = event.current_buffer.copy_selection()
event.cli.clipboard.set_data(clipboard_data)
@handle('X', filter=navigation_mode)
def _(event):
text = event.current_buffer.delete_before_cursor()
event.cli.clipboard.set_text(text)
@handle('y', 'y', filter=navigation_mode)
@handle('Y', filter=navigation_mode)
def _(event):
"""
Yank the whole line.
"""
text = '\n'.join(event.current_buffer.document.lines_from_current[:event.arg])
event.cli.clipboard.set_data(ClipboardData(text, SelectionType.LINES))
@handle('+', filter=navigation_mode)
def _(event):
"""
Move to first non whitespace of next line
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_cursor_down_position(count=event.arg)
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
@handle('-', filter=navigation_mode)
def _(event):
"""
Move to first non whitespace of previous line
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_cursor_up_position(count=event.arg)
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
@handle('>', '>', filter=navigation_mode)
def _(event):
"""
Indent lines.
"""
buffer = event.current_buffer
current_row = buffer.document.cursor_position_row
indent(buffer, current_row, current_row + event.arg)
@handle('<', '<', filter=navigation_mode)
def _(event):
"""
Unindent lines.
"""
current_row = event.current_buffer.document.cursor_position_row
unindent(event.current_buffer, current_row, current_row + event.arg)
@handle('>', filter=selection_mode)
def _(event):
"""
Indent selection
"""
buffer = event.current_buffer
selection_type = buffer.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = buffer.document.selection_range()
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
indent(buffer, from_, to + 1, count=event.arg)
@handle('<', filter=selection_mode)
def _(event):
"""
Unindent selection
"""
buffer = event.current_buffer
selection_type = buffer.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = buffer.document.selection_range()
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
unindent(buffer, from_, to + 1, count=event.arg)
@handle('O', filter=navigation_mode & ~IsReadOnly())
def _(event):
"""
Open line above and enter insertion mode
"""
event.current_buffer.insert_line_above(
copy_margin=not event.cli.in_paste_mode)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('o', filter=navigation_mode & ~IsReadOnly())
def _(event):
"""
Open line below and enter insertion mode
"""
event.current_buffer.insert_line_below(
copy_margin=not event.cli.in_paste_mode)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle('~', filter=navigation_mode)
def _(event):
"""
Reverse case of current character and move cursor forward.
"""
buffer = event.current_buffer
c = buffer.document.current_char
if c is not None and c != '\n':
c = (c.upper() if c.islower() else c.lower())
buffer.insert_text(c, overwrite=True)
@handle('#', filter=navigation_mode)
def _(event):
"""
Go to previous occurence of this word.
"""
b = event.cli.current_buffer
search_state = get_search_state(event.cli)
search_state.text = b.document.get_word_under_cursor()
search_state.direction = IncrementalSearchDirection.BACKWARD
b.apply_search(search_state, count=event.arg,
include_current_position=False)
@handle('*', filter=navigation_mode)
def _(event):
"""
Go to next occurence of this word.
"""
b = event.cli.current_buffer
search_state = get_search_state(event.cli)
search_state.text = b.document.get_word_under_cursor()
search_state.direction = IncrementalSearchDirection.FORWARD
b.apply_search(search_state, count=event.arg,
include_current_position=False)
@handle('(', filter=navigation_mode)
def _(event):
# TODO: go to begin of sentence.
pass
@handle(')', filter=navigation_mode)
def _(event):
# TODO: go to end of sentence.
pass
def change_delete_move_yank_handler(*keys, **kw):
"""
Register a change/delete/move/yank handlers. e.g. 'dw'/'cw'/'w'/'yw'
The decorated function should return a ``CursorRegion``.
This decorator will create both the 'change', 'delete' and move variants,
based on that ``CursorRegion``.
When there is nothing selected yet, this will also handle the "visual"
binding. E.g. 'viw' should select the current word.
"""
no_move_handler = kw.pop('no_move_handler', False)
# TODO: Also do '>' and '<' indent/unindent operators.
# TODO: Also "gq": text formatting
# See: :help motion.txt
def decorator(func):
if not no_move_handler:
@handle(*keys, filter=navigation_mode|selection_mode)
def move(event):
""" Create move handler. """
region = func(event)
event.current_buffer.cursor_position += region.start
def create_transform_handler(transform_func, *a):
@handle(*(a + keys), filter=navigation_mode)
def _(event):
""" Apply transformation (uppercase, lowercase, rot13, swap case). """
region = func(event)
start, end = region.sorted()
buffer = event.current_buffer
# Transform.
buffer.transform_region(
buffer.cursor_position + start,
buffer.cursor_position + end,
transform_func)
# Move cursor
buffer.cursor_position += (region.end or region.start)
for k, f in vi_transform_functions:
create_transform_handler(f, *k)
@handle('y', *keys, filter=navigation_mode)
def yank_handler(event):
""" Create yank handler. """
region = func(event)
buffer = event.current_buffer
start, end = region.sorted()
substring = buffer.text[buffer.cursor_position + start: buffer.cursor_position + end]
if substring:
event.cli.clipboard.set_text(substring)
def create(delete_only):
""" Create delete and change handlers. """
@handle('cd'[delete_only], *keys, filter=navigation_mode & ~IsReadOnly())
@handle('cd'[delete_only], *keys, filter=navigation_mode & ~IsReadOnly())
def _(event):
region = func(event)
deleted = ''
buffer = event.current_buffer
if region:
start, end = region.sorted()
# Move to the start of the region.
buffer.cursor_position += start
# Delete until end of region.
deleted = buffer.delete(count=end-start)
# Set deleted/changed text to clipboard.
if deleted:
event.cli.clipboard.set_text(deleted)
# Only go back to insert mode in case of 'change'.
if not delete_only:
get_vi_state(event.cli).input_mode = InputMode.INSERT
create(True)
create(False)
return func
return decorator
@change_delete_move_yank_handler('b')
def _(event):
""" Move one word or token left. """
return CursorRegion(event.current_buffer.document.find_start_of_previous_word(count=event.arg) or 0)
@change_delete_move_yank_handler('B')
def _(event):
""" Move one non-blank word left """
return CursorRegion(event.current_buffer.document.find_start_of_previous_word(count=event.arg, WORD=True) or 0)
@change_delete_move_yank_handler('$')
def key_dollar(event):
""" 'c$', 'd$' and '$': Delete/change/move until end of line. """
return CursorRegion(event.current_buffer.document.get_end_of_line_position())
@change_delete_move_yank_handler('w')
def _(event):
""" 'word' forward. 'cw', 'dw', 'w': Delete/change/move one word. """
return CursorRegion(event.current_buffer.document.find_next_word_beginning(count=event.arg) or
event.current_buffer.document.get_end_of_document_position())
@change_delete_move_yank_handler('W')
def _(event):
""" 'WORD' forward. 'cW', 'dW', 'W': Delete/change/move one WORD. """
return CursorRegion(event.current_buffer.document.find_next_word_beginning(count=event.arg, WORD=True) or
event.current_buffer.document.get_end_of_document_position())
@change_delete_move_yank_handler('e')
def _(event):
""" End of 'word': 'ce', 'de', 'e' """
end = event.current_buffer.document.find_next_word_ending(count=event.arg)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('E')
def _(event):
""" End of 'WORD': 'cE', 'dE', 'E' """
end = event.current_buffer.document.find_next_word_ending(count=event.arg, WORD=True)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('i', 'w', no_move_handler=True)
def _(event):
""" Inner 'word': ciw and diw """
start, end = event.current_buffer.document.find_boundaries_of_current_word()
return CursorRegion(start, end)
@change_delete_move_yank_handler('a', 'w', no_move_handler=True)
def _(event):
""" A 'word': caw and daw """
start, end = event.current_buffer.document.find_boundaries_of_current_word(include_trailing_whitespace=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('i', 'W', no_move_handler=True)
def _(event):
""" Inner 'WORD': ciW and diW """
start, end = event.current_buffer.document.find_boundaries_of_current_word(WORD=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('a', 'W', no_move_handler=True)
def _(event):
""" A 'WORD': caw and daw """
start, end = event.current_buffer.document.find_boundaries_of_current_word(WORD=True, include_trailing_whitespace=True)
return CursorRegion(start, end)
@change_delete_move_yank_handler('^')
def key_circumflex(event):
""" 'c^', 'd^' and '^': Soft start of line, after whitespace. """
return CursorRegion(event.current_buffer.document.get_start_of_line_position(after_whitespace=True))
@change_delete_move_yank_handler('0', no_move_handler=True)
def key_zero(event):
"""
'c0', 'd0': Hard start of line, before whitespace.
(The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.)
"""
return CursorRegion(event.current_buffer.document.get_start_of_line_position(after_whitespace=False))
def create_ci_ca_handles(ci_start, ci_end, inner):
# TODO: 'dab', 'dib', (brackets or block) 'daB', 'diB', Braces.
# TODO: 'dat', 'dit', (tags (like xml)
"""
Delete/Change string between this start and stop character. But keep these characters.
This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations.
"""
@change_delete_move_yank_handler('ai'[inner], ci_start, no_move_handler=True)
@change_delete_move_yank_handler('ai'[inner], ci_end, no_move_handler=True)
def _(event):
start = event.current_buffer.document.find_backwards(ci_start, in_current_line=False)
end = event.current_buffer.document.find(ci_end, in_current_line=False)
if start is not None and end is not None:
offset = 0 if inner else 1
return CursorRegion(start + 1 - offset, end + offset)
else:
# Nothing found.
return CursorRegion(0)
for inner in (False, True):
for ci_start, ci_end in [('"', '"'), ("'", "'"), ("`", "`"),
('[', ']'), ('<', '>'), ('{', '}'), ('(', ')')]:
create_ci_ca_handles(ci_start, ci_end, inner)
@change_delete_move_yank_handler('{')
def _(event):
"""
Move to previous blank-line separated section.
Implements '{', 'c{', 'd{', 'y{'
"""
def match_func(text):
return not text or text.isspace()
line_index = event.current_buffer.document.find_previous_matching_line(
match_func=match_func, count=event.arg)
if line_index:
index = event.current_buffer.document.get_cursor_up_position(count=-line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('}')
def _(event):
"""
Move to next blank-line separated section.
Implements '}', 'c}', 'd}', 'y}'
"""
def match_func(text):
return not text or text.isspace()
line_index = event.current_buffer.document.find_next_matching_line(
match_func=match_func, count=event.arg)
if line_index:
index = event.current_buffer.document.get_cursor_down_position(count=line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('f', Keys.Any)
def _(event):
"""
Go to next occurance of character. Typing 'fx' will move the
cursor to the next occurance of character. 'x'.
"""
get_vi_state(event.cli).last_character_find = CharacterFind(event.data, False)
match = event.current_buffer.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match or 0)
@change_delete_move_yank_handler('F', Keys.Any)
def _(event):
"""
Go to previous occurance of character. Typing 'Fx' will move the
cursor to the previous occurance of character. 'x'.
"""
get_vi_state(event.cli).last_character_find = CharacterFind(event.data, True)
return CursorRegion(event.current_buffer.document.find_backwards(event.data, in_current_line=True, count=event.arg) or 0)
@change_delete_move_yank_handler('t', Keys.Any)
def _(event):
"""
Move right to the next occurance of c, then one char backward.
"""
get_vi_state(event.cli).last_character_find = CharacterFind(event.data, False)
match = event.current_buffer.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match - 1 if match else 0)
@change_delete_move_yank_handler('T', Keys.Any)
def _(event):
"""
Move left to the previous occurance of c, then one char forward.
"""
get_vi_state(event.cli).last_character_find = CharacterFind(event.data, True)
match = event.current_buffer.document.find_backwards(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match + 1 if match else 0)
def repeat(reverse):
"""
Create ',' and ';' commands.
"""
@change_delete_move_yank_handler(',' if reverse else ';')
def _(event):
# Repeat the last 'f'/'F'/'t'/'T' command.
pos = 0
vi_state = get_vi_state(event.cli)
if vi_state.last_character_find:
char = vi_state.last_character_find.character
backwards = vi_state.last_character_find.backwards
if reverse:
backwards = not backwards
if backwards:
pos = event.current_buffer.document.find_backwards(char, in_current_line=True, count=event.arg)
else:
pos = event.current_buffer.document.find(char, in_current_line=True, count=event.arg)
return CursorRegion(pos or 0)
repeat(True)
repeat(False)
@change_delete_move_yank_handler('h')
@change_delete_move_yank_handler(Keys.Left)
def _(event):
""" Implements 'ch', 'dh', 'h': Cursor left. """
return CursorRegion(event.current_buffer.document.get_cursor_left_position(count=event.arg))
@change_delete_move_yank_handler('j', no_move_handler=True)
def _(event):
""" Implements 'cj', 'dj', 'j', ... Cursor up. """
return CursorRegion(event.current_buffer.document.get_cursor_down_position(count=event.arg))
@change_delete_move_yank_handler('k', no_move_handler=True)
def _(event):
""" Implements 'ck', 'dk', 'k', ... Cursor up. """
return CursorRegion(event.current_buffer.document.get_cursor_up_position(count=event.arg))
@change_delete_move_yank_handler('l')
@change_delete_move_yank_handler(' ')
@change_delete_move_yank_handler(Keys.Right)
def _(event):
""" Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right. """
return CursorRegion(event.current_buffer.document.get_cursor_right_position(count=event.arg))
@change_delete_move_yank_handler('H')
def _(event):
"""
Moves to the start of the visible region. (Below the scroll offset.)
Implements 'cH', 'dH', 'H'.
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.current_buffer
if w:
# When we find a Window that has BufferControl showing this window,
# move to the start of the visible area.
pos = (b.document.translate_row_col_to_index(
w.render_info.first_visible_line(after_scroll_offset=True), 0) -
b.cursor_position)
else:
# Otherwise, move to the start of the input.
pos = -len(b.document.text_before_cursor)
return CursorRegion(pos)
@change_delete_move_yank_handler('M')
def _(event):
"""
Moves cursor to the vertical center of the visible region.
Implements 'cM', 'dM', 'M'.
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.current_buffer
if w:
# When we find a Window that has BufferControl showing this window,
# move to the center of the visible area.
pos = (b.document.translate_row_col_to_index(
w.render_info.center_visible_line(), 0) -
b.cursor_position)
else:
# Otherwise, move to the start of the input.
pos = -len(b.document.text_before_cursor)
return CursorRegion(pos)
@change_delete_move_yank_handler('L')
def _(event):
"""
Moves to the end of the visible region. (Above the scroll offset.)
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.current_buffer
if w:
# When we find a Window that has BufferControl showing this window,
# move to the end of the visible area.
pos = (b.document.translate_row_col_to_index(
w.render_info.last_visible_line(before_scroll_offset=True), 0) -
b.cursor_position)
else:
# Otherwise, move to the end of the input.
pos = len(b.document.text_after_cursor)
return CursorRegion(pos)
@handle('z', '+', filter=navigation_mode|selection_mode)
@handle('z', 't', filter=navigation_mode|selection_mode)
@handle('z', Keys.ControlJ, filter=navigation_mode|selection_mode)
def _(event):
"""
Scrolls the window to makes the current line the first line in the visible region.
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w and w.render_info:
# Calculate the offset that we need in order to position the row
# containing the cursor in the center.
cursor_position_row = b.document.cursor_position_row
render_row = w.render_info.input_line_to_screen_line.get(cursor_position_row)
if render_row is not None:
w.vertical_scroll = max(0, render_row)
@handle('z', '-', filter=navigation_mode|selection_mode)
@handle('z', 'b', filter=navigation_mode|selection_mode)
def _(event):
"""
Scrolls the window to makes the current line the last line in the visible region.
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w and w.render_info:
# Calculate the offset that we need in order to position the row
# containing the cursor in the center.
cursor_position_row = b.document.cursor_position_row
render_row = w.render_info.input_line_to_screen_line.get(cursor_position_row)
if render_row is not None:
w.vertical_scroll = max(0, (render_row - w.render_info.window_height))
@handle('z', 'z', filter=navigation_mode|selection_mode)
def _(event):
"""
Center Window vertically around cursor.
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w and w.render_info:
# Calculate the offset that we need in order to position the row
# containing the cursor in the center.
cursor_position_row = b.document.cursor_position_row
render_row = w.render_info.input_line_to_screen_line.get(cursor_position_row)
if render_row is not None:
w.vertical_scroll = max(0, int(render_row - w.render_info.window_height / 2))
@change_delete_move_yank_handler('%')
def _(event):
"""
Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.)
If an 'arg' has been given, go this this % position in the file.
"""
buffer = event.current_buffer
if event._arg:
# If 'arg' has been given, the meaning of % is to go to the 'x%'
# row in the file.
if 0 < event.arg <= 100:
absolute_index = buffer.document.translate_row_col_to_index(
int(event.arg * buffer.document.line_count / 100), 0)
return CursorRegion(absolute_index - buffer.document.cursor_position)
else:
return CursorRegion(0) # Do nothing.
else:
# Move to the corresponding opening/closing bracket (()'s, []'s and {}'s).
return CursorRegion(buffer.document.matching_bracket_position)
@change_delete_move_yank_handler('|')
def _(event):
# Move to the n-th column (you may specify the argument n by typing
# it on number keys, for example, 20|).
return CursorRegion(event.current_buffer.document.get_column_cursor_position(event.arg))
@change_delete_move_yank_handler('g', 'g')
def _(event):
"""
Implements 'gg', 'cgg', 'ygg'
"""
d = event.current_buffer.document
if event._arg:
# Move to the given line.
return CursorRegion(d.translate_row_col_to_index(event.arg - 1, 0) - d.cursor_position)
else:
# Move to the top of the input.
return CursorRegion(d.get_start_of_document_position())
@change_delete_move_yank_handler('g', '_')
def _(event):
"""
Go to last non-blank of line.
'g_', 'cg_', 'yg_', etc..
"""
return CursorRegion(
event.current_buffer.document.last_non_blank_of_current_line_position())
@change_delete_move_yank_handler('g', 'e')
def _(event):
"""
Go to last character of previous word.
'ge', 'cge', 'yge', etc..
"""
return CursorRegion(
event.current_buffer.document.find_start_of_previous_word(count=event.arg) or 0)
@change_delete_move_yank_handler('g', 'E')
def _(event):
"""
Go to last character of previous WORD.
'gE', 'cgE', 'ygE', etc..
"""
return CursorRegion(
event.current_buffer.document.find_start_of_previous_word(
count=event.arg, WORD=True) or 0)
@change_delete_move_yank_handler('G')
def _(event):
"""
Go to the end of the document. (If no arg has been given.)
"""
return CursorRegion(len(event.current_buffer.document.text_after_cursor))
@handle('G', filter=HasArg())
def _(event):
"""
If an argument is given, move to this line in the history. (for
example, 15G)
"""
event.current_buffer.go_to_history(event.arg - 1)
@handle(Keys.Any, filter=navigation_mode)
@handle(Keys.Any, filter=selection_mode)
def _(event):
"""
Always handle numberics in navigation mode as arg.
"""
if event.data in '123456789' or (event._arg and event.data == '0'):
event.append_to_arg_count(event.data)
elif event.data == '0':
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)
@handle(Keys.Any, filter=replace_mode)
def _(event):
"""
Insert data at cursor position.
"""
event.current_buffer.insert_text(event.data, overwrite=True)
def create_selection_transform_handler(keys, transform_func):
"""
Apply transformation on selection (uppercase, lowercase, rot13, swap case).
"""
@handle(*keys, filter=selection_mode)
def _(event):
range = event.current_buffer.document.selection_range()
if range:
event.current_buffer.transform_region(range[0], range[1], transform_func)
for k, f in vi_transform_functions:
create_selection_transform_handler(k, f)
@handle(Keys.ControlX, Keys.ControlL, filter=insert_mode)
def _(event):
"""
Pressing the ControlX - ControlL sequence in Vi mode does line
completion based on the other lines in the document and the history.
"""
event.current_buffer.start_history_lines_completion()
@handle(Keys.ControlX, Keys.ControlF, filter=insert_mode)
def _(event):
"""
Complete file names.
"""
# TODO
pass
def load_vi_open_in_editor_bindings(registry, get_vi_state, filter=None):
"""
Pressing 'v' in navigation mode will open the buffer in an external editor.
"""
assert callable(get_vi_state)
navigation_mode = ViStateFilter(get_vi_state, InputMode.NAVIGATION) & ~ filters.HasSelection()
handle = create_handle_decorator(registry, filter)
@handle('v', filter=navigation_mode)
def _(event):
event.current_buffer.open_in_editor(event.cli)
def load_vi_system_bindings(registry, get_vi_state, filter=None):
assert callable(get_vi_state)
has_focus = filters.HasFocus(SYSTEM_BUFFER)
navigation_mode = ViStateFilter(get_vi_state, InputMode.NAVIGATION) & ~ filters.HasSelection()
handle = create_handle_decorator(registry, filter)
@handle('!', filter=~has_focus & navigation_mode)
def _(event):
"""
'!' opens the system prompt.
"""
event.cli.push_focus(SYSTEM_BUFFER)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle(Keys.Escape, filter=has_focus)
@handle(Keys.ControlC, filter=has_focus)
def _(event):
"""
Cancel system prompt.
"""
get_vi_state(event.cli).input_mode = InputMode.NAVIGATION
event.cli.buffers[SYSTEM_BUFFER].reset()
event.cli.pop_focus()
@handle(Keys.ControlJ, filter=has_focus)
def _(event):
"""
Run system command.
"""
get_vi_state(event.cli).input_mode = InputMode.NAVIGATION
system_buffer = event.cli.buffers[SYSTEM_BUFFER]
event.cli.run_system_command(system_buffer.text)
system_buffer.reset(append_to_history=True)
# Focus previous buffer again.
event.cli.pop_focus()
def load_vi_search_bindings(registry, get_vi_state, get_search_state=None,
filter=None, search_buffer_name=SEARCH_BUFFER):
assert callable(get_vi_state) # Callable that takes a CLI and returns a ViState.
assert get_search_state is None or callable(get_search_state)
if not get_search_state:
def get_search_state(cli): return cli.search_state
has_focus = filters.HasFocus(search_buffer_name)
navigation_mode = ~has_focus & (ViStateFilter(get_vi_state, InputMode.NAVIGATION) | filters.HasSelection())
handle = create_handle_decorator(registry, filter)
@handle('/', filter=navigation_mode)
@handle(Keys.ControlS, filter=~has_focus)
def _(event):
"""
Vi-style forward search.
"""
# Set the ViState.
get_search_state(event.cli).direction = IncrementalSearchDirection.FORWARD
get_vi_state(event.cli).input_mode = InputMode.INSERT
# Focus search buffer.
event.cli.push_focus(search_buffer_name)
@handle('?', filter=navigation_mode)
@handle(Keys.ControlR, filter=~has_focus)
def _(event):
"""
Vi-style backward search.
"""
# Set the ViState.
get_search_state(event.cli).direction = IncrementalSearchDirection.BACKWARD
# Focus search buffer.
event.cli.push_focus(search_buffer_name)
get_vi_state(event.cli).input_mode = InputMode.INSERT
@handle(Keys.ControlJ, filter=has_focus)
def _(event):
"""
Apply the search. (At the / or ? prompt.)
"""
input_buffer = event.cli.buffers.previous(event.cli)
search_buffer = event.cli.buffers[search_buffer_name]
# Update search state.
if search_buffer.text:
get_search_state(event.cli).text = search_buffer.text
# Apply search.
input_buffer.apply_search(get_search_state(event.cli))
# Add query to history of search line.
search_buffer.append_to_history()
search_buffer.reset()
# Focus previous document again.
get_vi_state(event.cli).input_mode = InputMode.NAVIGATION
event.cli.pop_focus()
def search_buffer_is_empty(cli):
""" Returns True when the search buffer is empty. """
return cli.buffers[search_buffer_name].text == ''
@handle(Keys.Escape, filter=has_focus)
@handle(Keys.ControlC, filter=has_focus)
@handle(Keys.Backspace, filter=has_focus & Condition(search_buffer_is_empty))
def _(event):
"""
Cancel search.
"""
get_vi_state(event.cli).input_mode = InputMode.NAVIGATION
event.cli.pop_focus()
event.cli.buffers[search_buffer_name].reset()
def load_extra_vi_page_navigation_bindings(registry, filter=None):
"""
Key bindings, for scrolling up and down through pages.
This are separate bindings, because GNU readline doesn't have them.
"""
handle = create_handle_decorator(registry, filter)
handle(Keys.ControlF)(scroll_forward)
handle(Keys.ControlB)(scroll_backward)
handle(Keys.ControlD)(scroll_half_page_down)
handle(Keys.ControlU)(scroll_half_page_up)
handle(Keys.ControlE)(scroll_one_line_down)
handle(Keys.ControlY)(scroll_one_line_up)
handle(Keys.PageDown)(scroll_page_down)
handle(Keys.PageUp)(scroll_page_up)
| 36.764234 | 171 | 0.632914 |
52e690bb1074d0e1aad6284160580eb96fd33d67 | 10,790 | py | Python | core/storage/opportunity/gae_models_test.py | Mystic-Slice/oppia | a0c63b07712a0cfb34a0cc5d4de8aaceeb709b9c | [
"Apache-2.0"
] | 3 | 2020-12-26T12:43:16.000Z | 2021-04-08T15:46:02.000Z | core/storage/opportunity/gae_models_test.py | Mystic-Slice/oppia | a0c63b07712a0cfb34a0cc5d4de8aaceeb709b9c | [
"Apache-2.0"
] | null | null | null | core/storage/opportunity/gae_models_test.py | Mystic-Slice/oppia | a0c63b07712a0cfb34a0cc5d4de8aaceeb709b9c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.opportunity.gae_models."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core import python_utils
from core.platform import models
from core.tests import test_utils
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import opportunity_models
(base_models, opportunity_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.opportunity])
class ExplorationOpportunitySummaryModelUnitTest(test_utils.GenericTestBase):
"""Test the ExplorationOpportunitySummaryModel class."""
def setUp(self) -> None:
super(ExplorationOpportunitySummaryModelUnitTest, self).setUp()
opportunity_models.ExplorationOpportunitySummaryModel(
id='opportunity_id1',
topic_id='topic_id1',
topic_name='A topic',
story_id='story_id1',
story_title='A story title',
chapter_title='A chapter title',
content_count=20,
incomplete_translation_language_codes=['hi', 'ar'],
translation_counts={},
language_codes_needing_voice_artists=['en'],
language_codes_with_assigned_voice_artists=[]
).put()
opportunity_models.ExplorationOpportunitySummaryModel(
id='opportunity_id2',
topic_id='topic_id2',
topic_name='A new topic',
story_id='story_id2',
story_title='A new story title',
chapter_title='A new chapter title',
content_count=120,
incomplete_translation_language_codes=['hi'],
translation_counts={},
language_codes_needing_voice_artists=['en'],
language_codes_with_assigned_voice_artists=[]
).put()
def test_get_deletion_policy(self) -> None:
self.assertEqual(
opportunity_models.ExplorationOpportunitySummaryModel
.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_all_translation_opportunities(self) -> None:
results, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_translation_opportunities(5, None, 'hi'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, 'opportunity_id1')
self.assertEqual(results[1].id, 'opportunity_id2')
self.assertFalse(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
def test_get_all_translation_opportunities_pagination(self) -> None:
results, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_translation_opportunities(1, None, 'hi'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'opportunity_id1')
self.assertTrue(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
results, new_cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_translation_opportunities(1, cursor, 'hi'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'opportunity_id2')
self.assertFalse(more)
self.assertTrue(isinstance(new_cursor, python_utils.BASESTRING))
def test_get_all_voiceover_opportunities(self) -> None:
results, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(5, None, 'en'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, 'opportunity_id1')
self.assertEqual(results[1].id, 'opportunity_id2')
self.assertFalse(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
def test_get_all_voiceover_opportunities_pagination(self) -> None:
results, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(1, None, 'en'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'opportunity_id1')
self.assertTrue(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
results, new_cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(1, cursor, 'en'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'opportunity_id2')
self.assertFalse(more)
self.assertTrue(isinstance(new_cursor, python_utils.BASESTRING))
def test_get_by_topic(self) -> None:
model_list = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_by_topic('topic_id1'))
# Ruling out the possibility of None for mypy type checking.
assert model_list is not None
self.assertEqual(len(model_list), 1)
self.assertEqual(model_list[0].id, 'opportunity_id1')
model_list = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_by_topic('topic_id2'))
# Ruling out the possibility of None for mypy type checking.
assert model_list is not None
self.assertEqual(len(model_list), 1)
self.assertEqual(model_list[0].id, 'opportunity_id2')
def test_get_by_topic_for_non_existing_topic(self) -> None:
model_list = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_by_topic('non_existing_topic_id'))
# Ruling out the possibility of None for mypy type checking.
assert model_list is not None
self.assertEqual(len(model_list), 0)
def test_delete_all(self) -> None:
results, _, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_translation_opportunities(1, None, 'hi'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertTrue(more)
opportunity_models.ExplorationOpportunitySummaryModel.delete_all()
results, _, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_translation_opportunities(1, None, 'hi'))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 0)
self.assertFalse(more)
class SkillOpportunityModelTest(test_utils.GenericTestBase):
"""Tests for the SkillOpportunityModel class."""
def setUp(self) -> None:
super(SkillOpportunityModelTest, self).setUp()
opportunity_models.SkillOpportunityModel(
id='opportunity_id1',
skill_description='A skill description',
question_count=20,
).put()
opportunity_models.SkillOpportunityModel(
id='opportunity_id2',
skill_description='A skill description',
question_count=30,
).put()
def test_get_deletion_policy(self) -> None:
self.assertEqual(
opportunity_models.SkillOpportunityModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_skill_opportunities(self) -> None:
results, cursor, more = (
opportunity_models.SkillOpportunityModel
.get_skill_opportunities(5, None))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, 'opportunity_id1')
self.assertEqual(results[1].id, 'opportunity_id2')
self.assertFalse(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
def test_get_skill_opportunities_pagination(self) -> None:
results, cursor, more = (
opportunity_models.SkillOpportunityModel.get_skill_opportunities(
1, None))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'opportunity_id1')
self.assertTrue(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
results, cursor, more = (
opportunity_models.SkillOpportunityModel.get_skill_opportunities(
1, cursor))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, 'opportunity_id2')
self.assertFalse(more)
self.assertTrue(isinstance(cursor, python_utils.BASESTRING))
def test_delete_all_skill_opportunities(self) -> None:
results, _, more = (
opportunity_models.SkillOpportunityModel.get_skill_opportunities(
1, None))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 1)
self.assertTrue(more)
opportunity_models.SkillOpportunityModel.delete_all()
results, _, more = (
opportunity_models.SkillOpportunityModel.get_skill_opportunities(
1, None))
# Ruling out the possibility of None for mypy type checking.
assert results is not None
self.assertEqual(len(results), 0)
self.assertFalse(more)
| 41.821705 | 77 | 0.67924 |
b63ac9e263c2908ff59d6538a87e31e2ceee0c6e | 67,953 | py | Python | labelImg.py | christophdrayss/labelImg-pointer-upgrade | 9304d2c347abb935543579e14554aa74ec97807c | [
"MIT"
] | null | null | null | labelImg.py | christophdrayss/labelImg-pointer-upgrade | 9304d2c347abb935543579e14554aa74ec97807c | [
"MIT"
] | null | null | null | labelImg.py | christophdrayss/labelImg-pointer-upgrade | 9304d2c347abb935543579e14554aa74ec97807c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import codecs
import distutils.spawn
import os.path
import platform
import re
import sys
import subprocess
from functools import partial
from collections import defaultdict
import random
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.combobox import ComboBox
from libs.resources import *
from libs.constants import *
from libs.utils import *
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.stringBundle import StringBundle
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError, LabelFileFormat
from libs.toolBar import ToolBar
from libs.pascal_voc_io import PascalVocReader
from libs.pascal_voc_io import XML_EXT
from libs.yolo_io import YoloReader
from libs.yolo_io import TXT_EXT
from libs.create_ml_io import CreateMLReader
from libs.create_ml_io import JSON_EXT
from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem
__appname__ = 'labelImg'
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
# Load string bundle for i18n
self.stringBundle = StringBundle.getBundle()
getStr = lambda strId: self.stringBundle.getString(strId)
# Save as Pascal voc xml
self.defaultSaveDir = defaultSaveDir
self.labelFileFormat = settings.get(SETTING_LABEL_FILE_FORMAT, LabelFileFormat.PASCAL_VOC)
# For loading all image under a directory
self.mImgList = []
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://youtu.be/p0nR2YsCY_U"
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for using default label
self.useDefaultLabelCheckbox = QCheckBox(getStr('useDefaultLabel'))
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(getStr('useDifficult'))
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# Create a widget for Auto-Point
self.autoPointCheckbox = QCheckBox(getStr('autoPoint'))
self.autoPointCheckbox.setChecked(False)
autoPointQHBoxLayout = QHBoxLayout()
autoPointQHBoxLayout.addWidget(self.autoPointCheckbox)
autoPointContainer = QWidget()
autoPointContainer.setLayout(autoPointQHBoxLayout)
# Create a widget for Auto-Bbox
self.autoBboxCheckbox = QCheckBox(getStr('autoBbox'))
self.autoBboxCheckbox.setChecked(False)
autoBboxQHBoxLayout = QHBoxLayout()
autoBboxQHBoxLayout.addWidget(self.autoBboxCheckbox)
autoBboxContainer = QWidget()
autoBboxContainer.setLayout(autoBboxQHBoxLayout)
# Add some of widgets to listLayout
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
listLayout.addWidget(autoPointContainer)
listLayout.addWidget(autoBboxContainer)
# Create and add combobox for showing unique labels in group
self.comboBox = ComboBox(self)
listLayout.addWidget(self.comboBox)
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
self.dock = QDockWidget(getStr('boxLabelText'), self)
self.dock.setObjectName(getStr('labels'))
self.dock.setWidget(labelListContainer)
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(getStr('fileList'), self)
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
self.canvas.setDrawingShapeToSquare(settings.get(SETTING_DRAW_SQUARE, False))
self.canvas.setDrawingShapeToPoint(settings.get(SETTING_DRAW_POINT, False))
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
# Actions
action = partial(newAction, self)
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
copyPrevBounding = action(getStr('copyPrevBounding'), self.copyPreviousBoundingBoxes,
'Ctrl+v', 'paste', getStr('copyPrevBounding'))
changeSavedir = action(getStr('changeSaveDir'), self.changeSavedirDialog,
'Ctrl+r', 'open', getStr('changeSavedAnnotationDir'))
openAnnotation = action(getStr('openAnnotation'), self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', getStr('openAnnotationDetail'))
openNextImg = action(getStr('nextImg'), self.openNextImg,
'd', 'next', getStr('nextImgDetail'))
openPrevImg = action(getStr('prevImg'), self.openPrevImg,
'a', 'prev', getStr('prevImgDetail'))
verify = action(getStr('verifyImg'), self.verifyImg,
'space', 'verify', getStr('verifyImgDetail'))
save = action(getStr('save'), self.saveFile,
'Ctrl+S', 'save', getStr('saveDetail'), enabled=False)
def getFormatMeta(format):
"""
returns a tuple containing (title, icon_name) of the selected format
"""
if format == LabelFileFormat.PASCAL_VOC:
return ('&PascalVOC', 'format_voc')
elif format == LabelFileFormat.YOLO:
return ('&YOLO', 'format_yolo')
elif format == LabelFileFormat.CREATE_ML:
return ('&CreateML', 'format_createml')
save_format = action(getFormatMeta(self.labelFileFormat)[0],
self.change_format, 'Ctrl+',
getFormatMeta(self.labelFileFormat)[1],
getStr('changeSaveFormat'), enabled=True)
saveAs = action(getStr('saveAs'), self.saveFileAs,
'Ctrl+Shift+S', 'save-as', getStr('saveAsDetail'), enabled=False)
close = action(getStr('closeCur'), self.closeFile, 'Ctrl+W', 'close', getStr('closeCurDetail'))
deleteImg = action(getStr('deleteImg'), self.deleteImg, 'k', 'close', getStr('deleteImgDetail'))
resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail'))
color1 = action(getStr('boxLineColor'), self.chooseColor1,
'Ctrl+L', 'color_line', getStr('boxLineColorDetail'))
createMode = action(getStr('crtBox'), self.setCreateMode,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action(getStr('crtBox'), self.createShape,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
createPoint = action(getStr('crtPoint'), self.createPoint,
'e', 'newPoint', getStr('crtBoxDetail'), enabled=False)
delete = action(getStr('delBox'), self.deleteSelectedShape,
'Delete', 'delete', getStr('delBoxDetail'), enabled=False)
copy = action(getStr('dupBox'), self.copySelectedShape,
'Ctrl+D', 'copy', getStr('dupBoxDetail'),
enabled=False)
advancedMode = action(getStr('advancedMode'), self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', getStr('advancedModeDetail'),
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', getStr('hideAllBoxDetail'),
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', getStr('showAllBoxDetail'),
enabled=False)
clearCursor = action('Clear\nCursor', partial(self.clearCursor, True),
'q', 'hide', getStr('clearCursor'),
enabled=False)
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action(getStr('zoomin'), partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', getStr('zoominDetail'), enabled=False)
zoomOut = action(getStr('zoomout'), partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', getStr('zoomoutDetail'), enabled=False)
zoomOrg = action(getStr('originalsize'), partial(self.setZoom, 100),
'Ctrl+=', 'zoom', getStr('originalsizeDetail'), enabled=False)
fitWindow = action(getStr('fitWin'), self.setFitWindow,
'Ctrl+F', 'fit-window', getStr('fitWinDetail'),
checkable=True, enabled=False)
fitWidth = action(getStr('fitWidth'), self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', getStr('fitWidthDetail'),
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(getStr('editLabel'), self.editLabel,
'Ctrl+E', 'edit', getStr('editLabelDetail'),
enabled=False)
self.editButton.setDefaultAction(edit)
shapeLineColor = action(getStr('shapeLineColor'), self.chshapeLineColor,
icon='color_line', tip=getStr('shapeLineColorDetail'),
enabled=False)
shapeFillColor = action(getStr('shapeFillColor'), self.chshapeFillColor,
icon='color', tip=getStr('shapeFillColorDetail'),
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText(getStr('showHide'))
labels.setShortcut('Ctrl+Shift+L')
# Label list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Draw squares/rectangles
self.drawSquaresOption = QAction('Draw Squares', self)
self.drawSquaresOption.setShortcut('Ctrl+Shift+R')
self.drawSquaresOption.setCheckable(True)
self.drawSquaresOption.setChecked(settings.get(SETTING_DRAW_SQUARE, False))
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling.
self.actions = struct(save=save, save_format=save_format, saveAs=saveAs, open=open, close=close, resetAll = resetAll, deleteImg = deleteImg,
lineColor=color1, create=create, createPoint=createPoint, delete=delete, edit=edit, copy=copy,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete,
None, color1, self.drawSquaresOption),
beginnerContext=(create, createPoint, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
close, create, createPoint, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu(getStr('menu_file')),
edit=self.menu(getStr('menu_edit')),
view=self.menu(getStr('menu_view')),
help=self.menu(getStr('menu_help')),
recentFiles=QMenu(getStr('menu_openRecent')),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction(getStr('autoSaveMode'), self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
# Shuffle images : Enable shuffle when importing images
self.shuffleMode = QAction(getStr('shuffleMode'), self)
self.shuffleMode.setCheckable(True)
self.shuffleMode.setShortcut("Ctrl+Shift+U")
self.shuffleMode.setChecked(settings.get(SETTING_SHUFFLE_MODE, False))
# Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self)
self.displayLabelOption.setShortcut("Ctrl+Shift+P")
self.displayLabelOption.setCheckable(True)
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(open, opendir, copyPrevBounding, changeSavedir, openAnnotation, self.menus.recentFiles, save, save_format, saveAs, close, resetAll, deleteImg, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.singleClassMode,
self.shuffleMode,
self.displayLabelOption,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, changeSavedir, openNextImg, openPrevImg, verify, save, save_format, None, create,createPoint, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, changeSavedir, openNextImg, openPrevImg, save, save_format, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.lastOpenDir= None
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = QPoint(0, 0)
saved_position = settings.get(SETTING_WIN_POSE, position)
# Fix the multiple monitors issue
for i in range(QApplication.desktop().screenCount()):
if QApplication.desktop().availableGeometry(i).contains(saved_position):
position = saved_position
break
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath, silent=True)
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(False)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Control:
# Draw rectangle if Ctrl is pressed
self.canvas.setDrawingShapeToSquare(True)
## Support Functions ##
def set_format(self, save_format):
if save_format == FORMAT_PASCALVOC:
self.actions.save_format.setText(FORMAT_PASCALVOC)
self.actions.save_format.setIcon(newIcon("format_voc"))
self.labelFileFormat = LabelFileFormat.PASCAL_VOC
LabelFile.suffix = XML_EXT
elif save_format == FORMAT_YOLO:
self.actions.save_format.setText(FORMAT_YOLO)
self.actions.save_format.setIcon(newIcon("format_yolo"))
self.labelFileFormat = LabelFileFormat.YOLO
LabelFile.suffix = TXT_EXT
elif save_format == FORMAT_CREATEML:
self.actions.save_format.setText(FORMAT_CREATEML)
self.actions.save_format.setIcon(newIcon("format_createml"))
self.labelFileFormat = LabelFileFormat.CREATE_ML
LabelFile.suffix = JSON_EXT
def change_format(self):
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
self.set_format(FORMAT_YOLO)
elif self.labelFileFormat == LabelFileFormat.YOLO:
self.set_format(FORMAT_CREATEML)
elif self.labelFileFormat == LabelFileFormat.CREATE_ML:
self.set_format(FORMAT_PASCALVOC)
else:
raise ValueError('Unknown label file format.')
self.setDirty()
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditingPolygon(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner()\
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
self.actions.createPoint.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
self.comboBox.cb.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open']
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
from libs.__init__ import __version__
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
# new point feature
def createPoint(self):
assert self.beginner()
self.canvas.setEditingPoint(False)
self.actions.createPoint.setEnabled(False)
# ned new point feature
def createShape(self):
assert self.beginner()
self.canvas.setEditingPolygon(False)
self.actions.create.setEnabled(False)
self.actions.createPoint.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditingPolygon(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
self.actions.createPoint.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditingPolygon(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.isEditingPolygon():
return
item = self.currentItem()
if not item:
return
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
self.updateComboBox()
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
# Add chris
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.isEditingPolygon():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
self.updateComboBox()
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
self.updateComboBox()
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
# Ensure the labels are within the bounds of the image. If not, fix them.
x, y, snapped = self.canvas.snapPointToCanvas(x, y)
if snapped:
self.setDirty()
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.updateComboBox()
self.canvas.loadShapes(s)
def updateComboBox(self):
# Get the unique labels and add them to the Combobox.
itemsTextList = [str(self.labelList.item(i).text()) for i in range(self.labelList.count())]
uniqueTextList = list(set(itemsTextList))
# Add a null row for showing all the labels
uniqueTextList.append("")
uniqueTextList.sort()
self.comboBox.update_items(uniqueTextList)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult = s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
if annotationFilePath[-4:].lower() != ".xml":
annotationFilePath += XML_EXT
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.labelFileFormat == LabelFileFormat.YOLO:
if annotationFilePath[-4:].lower() != ".txt":
annotationFilePath += TXT_EXT
self.labelFile.saveYoloFormat(annotationFilePath, shapes, self.filePath, self.imageData, self.labelHist,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.labelFileFormat == LabelFileFormat.CREATE_ML:
if annotationFilePath[-5:].lower() != ".json":
annotationFilePath += JSON_EXT
self.labelFile.saveCreateMLFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.labelHist, self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def comboSelectionChanged(self, index):
text = self.comboBox.cb.itemText(index)
for i in range(self.labelList.count()):
if text == "":
self.labelList.item(i).setCheckState(2)
elif text != self.labelList.item(i).text():
self.labelList.item(i).setCheckState(0)
else:
self.labelList.item(i).setCheckState(2)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.isEditingPolygon():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditingPolygon(True)
self.actions.create.setEnabled(True)
self.actions.createPoint.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def clearCursor(self, value):
self.tools.clear()
self.cursor.setEditingPolygon()
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
# Fix bug: An index error after select a directory when open a new file.
unicodeFilePath = ustr(filePath)
unicodeFilePath = os.path.abspath(unicodeFilePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
if unicodeFilePath in self.mImgList:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
else:
self.fileListWidget.clear()
self.mImgList.clear()
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
if isinstance(self.imageData, QImage):
image = self.imageData
else:
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
self.showBoundingBoxFromAnnotationFile(filePath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def showBoundingBoxFromAnnotationFile(self, filePath):
if self.defaultSaveDir is not None:
basename = os.path.basename(os.path.splitext(filePath)[0])
filedir = filePath.split(basename)[0].split(os.path.sep)[-2:-1][0]
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
jsonPath = os.path.join(self.defaultSaveDir, filedir + JSON_EXT)
"""Annotation file priority:
PascalXML > YOLO
"""
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
elif os.path.isfile(jsonPath):
self.loadCreateMLJSONByFilename(jsonPath, filePath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.labelFontSize = int(0.02 * max(self.image.width(), self.image.height()))
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ''
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_AUTO_SAVE] = self.shuffleMode.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings[SETTING_LABEL_FILE_FORMAT] = self.labelFileFormat
settings.save()
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
natural_sort(images, key=lambda x: x.lower())
if self.shuffleMode.isChecked():
random.shuffle(images)
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
if self.labelFileFormat == LabelFileFormat.PASCAL_VOC:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None, silent=False):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
if silent!=True :
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
else:
targetDirPath = ustr(defaultOpenDirPath)
self.lastOpenDir = targetDirPath
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
if self.autoPointCheckbox.isChecked():
self.createPoint()
elif self.autoBboxCheckbox.isChecked():
self.createShape()
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
if self.autoPointCheckbox.isChecked():
self.createPoint()
elif self.autoBboxCheckbox.isChecked():
self.createShape()
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog(removeExt=False))
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self, removeExt=True):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
if removeExt:
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
else:
return fullFilePath
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def deleteImg(self):
deletePath = self.filePath
deletingIndex = self.mImgList.index(self.filePath)
jumpBackIndex = deletingIndex
if deletingIndex >= 1:
jumpBackIndex = self.mImgList.index(self.filePath)
print("deletingIndex "+str(deletingIndex))
print("deletePath "+deletePath)
if deletePath is not None:
self.openNextImg()
if os.path.exists(deletePath):
os.remove(deletePath)
self.importDirImages(self.lastOpenDir)
print("After deleting jump to index "+str(jumpBackIndex))
filename = self.mImgList[jumpBackIndex]
if filename:
self.loadFile(filename)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
if not self.dirty:
return True
else:
discardChanges = self.discardChangesDialog()
if discardChanges == QMessageBox.No:
return True
elif discardChanges == QMessageBox.Yes:
self.saveFile()
return True
else:
return False
def discardChangesDialog(self):
yes, no, cancel = QMessageBox.Yes, QMessageBox.No, QMessageBox.Cancel
msg = u'You have unsaved changes, would you like to save them and proceed?\nClick "No" to undo all changes.'
return QMessageBox.warning(self, u'Attention', msg, yes | no | cancel)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return
if os.path.isfile(xmlPath) is False:
return
self.set_format(FORMAT_PASCALVOC)
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
def loadYOLOTXTByFilename(self, txtPath):
if self.filePath is None:
return
if os.path.isfile(txtPath) is False:
return
self.set_format(FORMAT_YOLO)
tYoloParseReader = YoloReader(txtPath, self.image)
shapes = tYoloParseReader.getShapes()
print (shapes)
self.loadLabels(shapes)
self.canvas.verified = tYoloParseReader.verified
def loadCreateMLJSONByFilename(self, jsonPath, filePath):
if self.filePath is None:
return
if os.path.isfile(jsonPath) is False:
return
self.set_format(FORMAT_CREATEML)
crmlParseReader = CreateMLReader(jsonPath, filePath)
shapes = crmlParseReader.get_shapes()
self.loadLabels(shapes)
self.canvas.verified = crmlParseReader.verified
def copyPreviousBoundingBoxes(self):
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
prevFilePath = self.mImgList[currIndex - 1]
self.showBoundingBoxFromAnnotationFile(prevFilePath)
self.saveFile()
def togglePaintLabelsOption(self):
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
reader = QImageReader(filename)
reader.setAutoTransform(True)
return reader.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
argparser = argparse.ArgumentParser()
argparser.add_argument("image_dir", nargs="?")
argparser.add_argument("predefined_classes_file",
default=os.path.join(os.path.dirname(__file__), "data", "predefined_classes.txt"),
nargs="?")
argparser.add_argument("save_dir", nargs="?")
args = argparser.parse_args(argv[1:])
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(args.image_dir,
args.predefined_classes_file,
args.save_dir)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
| 40.208876 | 169 | 0.612232 |
be8eb9eb317319d82704523c1bfb6091ae1aa898 | 9,974 | py | Python | hubspot/cms/blogs/blog_posts/configuration.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/cms/blogs/blog_posts/configuration.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/cms/blogs/blog_posts/configuration.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Blog Post endpoints
\"Use these endpoints for interacting with Blog Posts, Blog Authors, and Blog Tags\" # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s)
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
"""
def __init__(self, host="https://api.hubapi.com", api_key=None, api_key_prefix=None, username="", password=""):
"""Constructor"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.access_token = ""
"""access token for OAuth/Bearer
"""
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("hubspot.cms.blogs.blog_posts")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = "%(asctime)s %(levelname)s %(message)s"
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ""
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = False
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(basic_auth=self.username + ":" + self.password).get("authorization")
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
"hapikey": {"type": "api_key", "in": "query", "key": "hapikey", "value": self.get_api_key_with_prefix("hapikey")},
"oauth2_legacy": {"type": "oauth2", "in": "header", "key": "Authorization", "value": "Bearer " + self.access_token},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n" "OS: {env}\n" "Python Version: {pyversion}\n" "Version of the API: v3\n" "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
"url": "https://api.hubapi.com/",
"description": "No description provided",
}
]
def get_host_from_settings(self, index, variables={}):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
servers = self.get_host_settings()
# check array index out of bound
if index < 0 or index >= len(servers):
raise ValueError("Invalid index {} when selecting the host settings. Must be less than {}".format(index, len(servers))) # noqa: E501
server = servers[index]
url = server["url"]
# go through variable and assign a value
for variable_name in server["variables"]:
if variable_name in variables:
if variables[variable_name] in server["variables"][variable_name]["enum_values"]:
url = url.replace("{" + variable_name + "}", variables[variable_name])
else:
raise ValueError(
"The variable `{}` in the host URL has invalid value {}. Must be {}.".format( # noqa: E501
variable_name, variables[variable_name], server["variables"][variable_name]["enum_values"]
)
)
else:
# use default value
url = url.replace("{" + variable_name + "}", server["variables"][variable_name]["default_value"])
return url
| 33.35786 | 193 | 0.59234 |
e1665e7cef7bc6d3af887c13359ab6fb9b58d183 | 15,218 | py | Python | src/alchemlyb/parsing/namd.py | BranniganLab/alchemlyb | a8ac1a1472f124feec9b20b1afdbea352e2ac5a4 | [
"BSD-3-Clause"
] | 83 | 2017-01-09T19:29:09.000Z | 2022-03-17T09:35:08.000Z | src/alchemlyb/parsing/namd.py | BranniganLab/alchemlyb | a8ac1a1472f124feec9b20b1afdbea352e2ac5a4 | [
"BSD-3-Clause"
] | 185 | 2016-11-17T18:09:40.000Z | 2022-03-29T00:38:59.000Z | src/alchemlyb/parsing/namd.py | Becksteinlab/alchemlyb | 9153bbd00425bd02dfb11c6193aa5309d4681e4b | [
"BSD-3-Clause"
] | 37 | 2017-08-09T17:30:43.000Z | 2022-01-17T19:49:58.000Z | """Parsers for extracting alchemical data from `NAMD <http://www.ks.uiuc.edu/Research/namd/>`_ output files.
"""
import pandas as pd
import numpy as np
from os.path import basename
from re import split
import logging
from .util import anyopen
from . import _init_attrs
from ..postprocessors.units import R_kJmol, kJ2kcal
logger = logging.getLogger("alchemlyb.parsers.NAMD")
k_b = R_kJmol * kJ2kcal
def _filename_sort_key(s):
"""Key for natural-sorting filenames, ignoring the path.
This means that unlike with the standard Python sorted() function, "foo9" < "foo10".
"""
return [int(t) if t.isdigit() else t.lower() for t in split(r'(\d+)', basename(s))]
def _get_lambdas(fep_files):
"""Retrieves all lambda values included in the FEP files provided.
We have to do this in order to tolerate truncated and restarted fepout files.
The IDWS lambda is not present at the termination of the window, presumably
for backwards compatibility with ParseFEP and probably other things.
For a given lambda1, there can be only one lambda2 and at most one lambda_idws.
Parameters
----------
fep_files: str or list of str
Path(s) to fepout files to extract data from.
Returns
-------
List of floats, or None if there is more than one lambda_idws for each lambda1.
"""
lambda_fwd_map, lambda_bwd_map = {}, {}
is_ascending = set()
endpoint_windows = []
for fep_file in sorted(fep_files, key=_filename_sort_key):
with anyopen(fep_file, 'r') as f:
for line in f:
l = line.strip().split()
# We might not have a #NEW line so make the best guess
if l[0] == '#NEW':
lambda1, lambda2 = float(l[6]), float(l[8])
lambda_idws = float(l[10]) if 'LAMBDA_IDWS' in l else None
elif l[0] == '#Free':
lambda1, lambda2, lambda_idws = float(l[7]), float(l[8]), None
else:
# We only care about lines with lambda values. No need to
# do all that other processing below for every line
continue # pragma: no cover
# Keep track of whether the lambda values are increasing or decreasing, so we can return
# a sorted list of the lambdas in the correct order.
# If it changes during parsing of this set of fepout files, then we know something is wrong
# Keep track of endpoints separately since in IDWS runs there must be one of opposite direction
if 0.0 in (lambda1, lambda2) or 1.0 in (lambda1, lambda2):
endpoint_windows.append((lambda1, lambda2))
else:
# If the lambdas are equal then this doesn't represent an ascending window
if lambda2 != lambda1:
is_ascending.add(lambda2 > lambda1)
if lambda_idws is not None and lambda1 != lambda_idws:
is_ascending.add(lambda1 > lambda_idws)
if len(is_ascending) > 1:
raise ValueError(f'Lambda values change direction in {fep_file}, relative to the other files: {lambda1} -> {lambda2} (IDWS: {lambda_idws})')
# Make sure the lambda2 values are consistent
if lambda1 in lambda_fwd_map and lambda_fwd_map[lambda1] != lambda2:
logger.error(f'fwd: lambda1 {lambda1} has lambda2 {lambda_fwd_map[lambda1]} in {fep_file} but it has already been {lambda2}')
raise ValueError('More than one lambda2 value for a particular lambda1')
lambda_fwd_map[lambda1] = lambda2
# Make sure the lambda_idws values are consistent
if lambda_idws is not None:
if lambda1 in lambda_bwd_map and lambda_bwd_map[lambda1] != lambda_idws:
logger.error(f'bwd: lambda1 {lambda1} has lambda_idws {lambda_bwd_map[lambda1]} but it has already been {lambda_idws}')
raise ValueError('More than one lambda_idws value for a particular lambda1')
lambda_bwd_map[lambda1] = lambda_idws
is_ascending = next(iter(is_ascending))
all_lambdas = set()
all_lambdas.update(lambda_fwd_map.keys())
all_lambdas.update(lambda_fwd_map.values())
all_lambdas.update(lambda_bwd_map.keys())
all_lambdas.update(lambda_bwd_map.values())
return list(sorted(all_lambdas, reverse=not is_ascending))
@_init_attrs
def extract_u_nk(fep_files, T):
"""Return reduced potentials `u_nk` from NAMD fepout file(s).
Parameters
----------
fep_file : str or list of str
Path to fepout file(s) to extract data from. These are sorted by filename,
not including the path, prior to processing, using natural-sort. This way,
filenames including numbers without leading zeros are handled intuitively.
Windows may be split across files, or more than one window may be present
in a given file. Windows without footer lines (which may be in a different
file than the respective header lines) will raise an error. This means that
while windows may have been interrupted and restarted, they must be
complete. Lambda values are expected to increase or decrease monotonically,
and match between header and footer of each window.
T : float
Temperature in Kelvin at which the simulation was sampled.
Returns
-------
u_nk : DataFrame
Potential energy for each alchemical state (k) for each frame (n).
Note
----
If the number of forward and backward samples in a given window are different,
the extra sample(s) will be discarded. This is typically zero or one sample.
.. versionchanged:: 0.5.0
The :mod:`scipy.constants` is used for parsers instead of
the constants used by the corresponding MD engine.
.. versionchanged:: 0.6.0
Support for Interleaved Double-Wide Sampling files added, with various
robustness checks.
`fep_files` can now be a list of filenames.
"""
beta = 1/(k_b * T)
# lists to get times and work values of each window
win_ts = []
win_de = []
win_ts_back = []
win_de_back = []
# create dataframe for results
u_nk = pd.DataFrame(columns=['time','fep-lambda'])
# boolean flag to parse data after equil time
parsing = False
if type(fep_files) is str:
fep_files = [fep_files]
# Extract the lambda values only from the fepouts
all_lambdas = _get_lambdas(fep_files)
# open and get data from fep file.
# We sort the list of fep files in case some of them represent restarted windows.
# The assumption is that they make sense in lexicographic order.
# We keep track of which lambda window we're in, but since it can span multiple files,
# only reset these variables here and after the end of each window
lambda1_at_start, lambda2_at_start, lambda_idws_at_start = None, None, None
for fep_file in sorted(fep_files, key=_filename_sort_key):
# Note we have not set parsing=False because we could be continuing one window across
# more than one fepout file
with anyopen(fep_file, 'r') as f:
has_idws = False
for line in f:
l = line.strip().split()
# We don't know if IDWS was enabled just from the #Free line, and we might not have
# a #NEW line in this file, so we have to check for the existence of FepE_back lines
# We rely on short-circuit evaluation to avoid the string comparison most of the time
if has_idws is False and l[0] == 'FepE_back:':
has_idws = True
# New window, get IDWS lambda if any
# We keep track of lambdas from the #NEW line and if they disagree with the #Free line
# within the same file, then complain. This can happen if truncated fepout files
# are presented in the wrong order.
if l[0] == '#NEW':
if parsing:
logger.error(f'Window with lambda1: {lambda1_at_start} lambda2: {lambda2_at_start} lambda_idws: {lambda_idws_at_start} appears truncated')
logger.error(f'because a new window was encountered in {fep_file} before the previous one finished.')
raise ValueError('New window begun after truncated window')
lambda1_at_start, lambda2_at_start = float(l[6]), float(l[8])
lambda_idws_at_start = float(l[10]) if 'LAMBDA_IDWS' in l else None
has_idws = True if lambda_idws_at_start is not None else False
# this line marks end of window; dump data into dataframe
if l[0] == '#Free':
# extract lambda values for finished window
# lambda1 = sampling lambda (row), lambda2 = comparison lambda (col)
lambda1 = float(l[7])
lambda2 = float(l[8])
# If the lambdas are not what we thought they would be, raise an exception to ensure the calculation
# fails. This can happen if fepouts where one window spans multiple fepouts are processed out of order
# NB: There is no way to tell if lambda_idws changed because it isn't in the '#Free' line that ends a window
if lambda1_at_start is not None \
and (lambda1, lambda2) != (lambda1_at_start, lambda2_at_start):
logger.error(f"Lambdas changed unexpectedly while processing {fep_file}")
logger.error(f"l1, l2: {lambda1_at_start}, {lambda2_at_start} changed to {lambda1}, {lambda2}")
logger.error(line)
raise ValueError("Inconsistent lambda values within the same window")
# As we are at the end of a window, convert last window's work and times values to np arrays
# (with energy unit kT since they were kcal/mol in the fepouts)
win_de_arr = beta * np.asarray(win_de) # dE values
win_ts_arr = np.asarray(win_ts) # timesteps
# This handles the special case where there are IDWS energies but no lambda_idws value in the
# current .fepout file. This can happen when the NAMD firsttimestep is not 0, because NAMD only emits
# the '#NEW' line on timestep 0 for some reason. Perhaps the user ran minimize before dynamics,
# or this is a restarted run.
# We infer lambda_idws_at_start if it wasn't explictly included in this fepout.
# If lambdas are in ascending order, choose the one before it all_lambdas, and if descending, choose
# the one after. This happens "automatically" because the lambdas were returned already sorted
# in the correct direction by _get_lambdas().
# The "else" case is handled by the rest of this block, by default.
if has_idws and lambda_idws_at_start is None:
l1_idx = all_lambdas.index(lambda1)
# Test for the highly pathological case where the first window is both incomplete and has IDWS
# data but no lambda_idws value.
if l1_idx == 0:
raise ValueError(f'IDWS data present in first window but lambda_idws not included; no way to infer the correct lambda_idws')
lambda_idws_at_start = all_lambdas[l1_idx - 1]
logger.warning(f'Warning: {fep_file} has IDWS data but lambda_idws not included.')
logger.warning(f' lambda1 = {lambda1}, lambda2 = {lambda2}; inferring lambda_idws to be {lambda_idws_at_start}')
if lambda_idws_at_start is not None:
# Mimic classic DWS data
# Arbitrarily match up fwd and bwd comparison energies on the same times
# truncate extra samples from whichever array is longer
win_de_back_arr = beta * np.asarray(win_de_back)
n = min(len(win_de_back_arr), len(win_de_arr))
tempDF = pd.DataFrame({
'time': win_ts_arr[:n],
'fep-lambda': np.full(n,lambda1),
lambda1: 0,
lambda2: win_de_arr[:n],
lambda_idws_at_start: win_de_back_arr[:n]})
# print(f"{fep_file}: IDWS window {lambda1} {lambda2} {lambda_idws_at_start}")
else:
# print(f"{fep_file}: Forward-only window {lambda1} {lambda2}")
# create dataframe of times and work values
# this window's data goes in row LAMBDA1 and column LAMBDA2
tempDF = pd.DataFrame({
'time': win_ts_arr,
'fep-lambda': np.full(len(win_de_arr), lambda1),
lambda1: 0,
lambda2: win_de_arr})
# join the new window's df to existing df
u_nk = pd.concat([u_nk, tempDF], sort=True)
# reset values for next window of fepout file
win_de = []
win_ts = []
win_de_back = []
win_ts_back = []
parsing = False
has_idws = False
lambda1_at_start, lambda2_at_start, lambda_idws_at_start = None, None, None
# append work value from 'dE' column of fepout file
if parsing:
if l[0] == 'FepEnergy:':
win_de.append(float(l[6]))
win_ts.append(float(l[1]))
elif l[0] == 'FepE_back:':
win_de_back.append(float(l[6]))
win_ts_back.append(float(l[1]))
# Turn parsing on after line 'STARTING COLLECTION OF ENSEMBLE AVERAGE'
if '#STARTING' in l:
parsing = True
if len(win_de) != 0 or len(win_de_back) != 0: # pragma: no cover
logger.warning('Trailing data without footer line (\"#Free energy...\"). Interrupted run?')
raise ValueError('Last window is truncated')
if lambda2 in (0.0, 1.0):
# this excludes the IDWS case where a dataframe already exists for both endpoints
# create last dataframe for fep-lambda at last LAMBDA2
tempDF = pd.DataFrame({
'time': win_ts_arr,
'fep-lambda': lambda2})
u_nk = pd.concat([u_nk, tempDF], sort=True)
u_nk.set_index(['time','fep-lambda'], inplace=True)
return u_nk
| 49.249191 | 162 | 0.593508 |
27e98a0f6745f10872e69f21159a2658dfc2d557 | 3,889 | py | Python | contrib/linearize/linearize-hashes.py | ILCOINDevelopmentTeam/ilcoin-master | f6ceb8adcbd5db8d5cb8beeaf937ceb2d76bb3af | [
"MIT"
] | 21 | 2021-01-17T06:44:12.000Z | 2022-03-10T02:11:24.000Z | contrib/linearize/linearize-hashes.py | Borishbc/ilcoin-master | b03cebfb0296379252b991d4622c65d3628f965d | [
"MIT"
] | 2 | 2020-06-22T12:41:52.000Z | 2020-07-15T03:44:41.000Z | contrib/linearize/linearize-hashes.py | ILCoinDevTeam/ilcoin-master | f6ceb8adcbd5db8d5cb8beeaf937ceb2d76bb3af | [
"MIT"
] | 10 | 2019-02-28T09:33:24.000Z | 2020-09-17T11:37:59.000Z | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Ilcoin Core developers
# All Rights Reserved. ILCoin Blockchain Project 2019©
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class IlcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = IlcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
get_block_hashes(settings)
| 28.595588 | 90 | 0.682438 |
26fde19d134ee0834dd59371c8de2140e8eb5bc6 | 415 | py | Python | src/modules/polynomial/LinkActivation.py | ychnlgy/Chebyshev-Lagrange | 74292e72b83f992d6c42a2f2db04dfdce5a52aea | [
"MIT"
] | 1 | 2021-08-19T14:28:45.000Z | 2021-08-19T14:28:45.000Z | src/modules/polynomial/LinkActivation.py | ychnlgy/Chebyshev-Lagrange | 74292e72b83f992d6c42a2f2db04dfdce5a52aea | [
"MIT"
] | null | null | null | src/modules/polynomial/LinkActivation.py | ychnlgy/Chebyshev-Lagrange | 74292e72b83f992d6c42a2f2db04dfdce5a52aea | [
"MIT"
] | 1 | 2022-03-11T07:20:06.000Z | 2022-03-11T07:20:06.000Z | from . import RegActivation
class LinkActivation(RegActivation):
# === PROTECTED ===
def calc_weight(self, slc, *args):
if slc is self.leftslice: # left or <-1
w = self.basis.grad_neg1()
else: # right or >1
assert slc is self.rightslice
w = self.basis.grad_pos1()
p = self.weight*w.view(1, 1, len(w), 1)
return p.sum(dim=2).view(-1, 1)
| 27.666667 | 47 | 0.563855 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.