id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
4894444 | <reponame>uw-advanced-robotics/aruw-vision-platform-2019
#!/usr/bin/env python
import rospy
import common_utils
from common_utils import watch_for_robot_id, RobotType, register_robot_type_changed_handler
from four_wheel_mecanum_drive import SoldierDrive, HeroDrive, EngineerDrive
from sentinel_drive import SentinelDrive
class OdometryNode:
'''
Odometry ROS node.
Encapsulates the DriveOdometry object that
tracks and publishes this robot's current odometry state
'''
def __init__(self):
rospy.init_node('aruw_odometry')
common_utils.frameTransformer.init_tf()
self.INIT_ODOMETRY_BY_ROBOT_TYPE = {
RobotType.HERO: self.init_hero,
RobotType.ENGINEER: self.init_engineer,
RobotType.SOLDIER: self.init_soldier,
RobotType.SENTINEL: self.init_sentinel
}
self._active_odom = None
watch_for_robot_id()
register_robot_type_changed_handler(self._handle_robot_type_changed)
# Main loop
rate = rospy.Rate(100)
while not rospy.is_shutdown():
if self._active_odom:
self._active_odom.publish()
rate.sleep()
def _handle_robot_type_changed(self, new_robot_type):
'''
Callback function for when Robot ID changes on the referee system.
Changes the active DriveOdometry object to the one that matches the new robot type
'''
rospy.loginfo("New robot type set: {}; updating odometry...".format(new_robot_type))
self._active_odom = self.INIT_ODOMETRY_BY_ROBOT_TYPE[new_robot_type]()
def init_hero(self):
return HeroDrive()
def init_engineer(self):
return EngineerDrive()
def init_soldier(self):
return SoldierDrive()
def init_sentinel(self):
return SentinelDrive()
if __name__ == '__main__':
odometry = OdometryNode()
| StarcoderdataPython |
1629965 | import itertools
import json
import random
from abc import ABC
from pathlib import Path
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset as TorchDataset
from typing import Callable, List, Iterable, Tuple
from deepproblog.dataset import Dataset
from deepproblog.query import Query
from problog.logic import Term, list2term, Constant
class ClothGroupHelper(object):
def __init__(self, dataset, size):
self.labelMap = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat', 5: 'Sandal',
6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
self.tops = [0, 2, 6][:size]
self.bots = [1, 3, 8][:size]
self.shoe = [5, 7, 9][:size]
self.allClothes = self.tops + self.bots + self.shoe
random.seed(0)
self.data = dict()
for i in range(10):
self.data[i] = []
for datapoint in dataset:
self.data[datapoint[1]].append(datapoint)
def createTrainingData(self, training_data_size):
trainingData = []
trainingIndices = []
corrects, wrongs = training_data_size / 2, training_data_size / 2
i = 0
while corrects > 0 or wrongs > 0: # very unsafe, assumes more wrongs than rights
newCombo = random.choices(self.allClothes, k=3) if wrongs > 0 else self.getRandomCorrect()
correct = self.isCorrectCombination(newCombo)
trainingData.extend([self.getRandomItemFromData(clothId) for clothId in newCombo])
trainingIndices.append((i, i + 1, i + 2))
i += 3
if correct:
corrects -= 1
else:
wrongs -= 1
trainingIndices = random.sample(trainingIndices, len(trainingIndices)) # shuffle data
return trainingData, trainingIndices
def getRandomCorrect(self):
return random.sample([random.choice(self.tops), random.choice(self.bots), random.choice(self.shoe)], 3)
def getCloth(self, kind, index):
return self.data[kind][index]
def getRandomItemFromData(self, classo):
return random.sample(self.data[classo], 1)[0]
def isCorrectCombination(self, combo):
return all([bool(set(c) & set(combo)) for c in [self.tops, self.bots, self.shoe]])
def __len__(self):
return sum(len(x) for x in self.data.values())
transform = transforms.Compose([transforms.ToTensor()])
datasets = {
"train": torchvision.datasets.FashionMNIST(root='./data/', train=True, download=True, transform=transform),
"test": torchvision.datasets.FashionMNIST(root='./data/', train=False, download=True, transform=transform)
}
class MNIST_Images(object):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, item):
return self.dataset[int(item[0])][0]
# TODO look at addition_mil
# class MNIST(Dataset):
# def __len__(self):
# return len(self.dataset)
#
# def to_query(self, i):
# l = self.dataset[i][1]
# l = Constant(self.val_list.index(l))
#
# return Query(
# Term("clothes", Term("tensor", Term(self.dataset, Term("a"))), l),
# substitution={Term("a"): Constant(i)},
# )
#
# def __init__(self, dataset):
# self.dataset = dataset
# self.val_list = list(labelMap.values())
def clothesGroup(dataset: str, size, training_size):
"""Returns a dataset for binary addition"""
return MNISTOperator(
dataset_name=dataset,
function_name="clothesGroup",
size=size,
arity=3,
training_data_size=training_size
)
class MNISTOperator(Dataset, TorchDataset):
def __getitem__(self, index: int) -> Tuple[list, list, list]:
i1, i2, i3 = self.data_indices[index]
c1, c2, c3 = self.data[i1], self.data[i2], self.data[i3]
return c1[0].unsqueeze(0), c2[0].unsqueeze(0), c3[0].unsqueeze(0)
def __init__(self, dataset_name, size, function_name: str, arity, training_data_size: int = 100):
"""Generic dataset for operator(img, img) style datasets.
:param function_name: Name of Problog function to query.
:param size: Size of number of the cloth groups
:param arity: Number of arguments for the operator
"""
self.dataset_name = dataset_name
self.function_name = function_name
self.arity = arity
self.cloth_group_helper = ClothGroupHelper(datasets[dataset_name], size)
self.data = []
self.data_indices = []
self.data, self.data_indices = self.cloth_group_helper.createTrainingData(training_data_size)
def get_tensor_source(self):
return MNIST_Images(self.data)
def to_file_repr(self, i):
"""Old file represenation dump. Not a very clear format as multi-digit arguments are not separated"""
return NotImplementedError()
def to_json(self):
"""
Convert to JSON, for easy comparisons with other systems.
Format is [EXAMPLE, ...]
EXAMPLE :- [ARGS, expected_result]
ARGS :- [MULTI_DIGIT_NUMBER, ...]
MULTI_DIGIT_NUMBER :- [mnist_img_id, ...]
"""
return NotImplementedError()
def to_query(self, ind: int) -> Query:
"""Generate queries"""
# Build substitution dictionary for the arguments
indices = self.data_indices[ind]
expected_result = self._get_label(ind)
subs = dict()
var_names = []
for i in range(self.arity):
inner_vars = []
t = Term(f"p{i}")
subs[t] = Term(
"tensor",
Term(
self.dataset_name,
Constant(indices[i]),
),
)
inner_vars.append(t)
var_names.append(inner_vars)
# Build query
return Query(
Term(
self.function_name,
*(e[0] for e in var_names),
Constant(expected_result)
),
subs,
)
def _get_label(self, i: int):
indices = self.data_indices[i]
clothes = [self.data[x][1] for x in indices]
return int(self.cloth_group_helper.isCorrectCombination(clothes))
def __len__(self):
return len(self.data_indices)
| StarcoderdataPython |
3233573 | <reponame>yash0307jain/competitive-programming
import time
from pprint import pprint
def ratMaze(arr, row, col, ans):
if (row == len(arr) - 1) and (col == len(arr) - 1):
ans[row][col] = 1
return True
# bottom
if row + 1 < len(arr):
if (arr[row + 1][col] == 1):
ans[row][col] = 1
arr[row][col] = 0
if (ratMaze(arr, row + 1, col, ans)):
return True
else:
ans[row][col] = 0
arr[row][col] = -1
# right
if col + 1 < len(arr):
if (arr[row][col + 1] == 1):
ans[row][col] = 1
arr[row][col] = 0
if (ratMaze(arr, row, col + 1, ans)):
return True
else:
ans[row][col] = 0
arr[row][col] = -1
# left
if col - 1 >= 0:
if (arr[row][col - 1] == 1):
ans[row][col] = 1
arr[row][col] = 0
if (ratMaze(arr, row, col - 1, ans)):
return True
else:
ans[row][col] = 0
arr[row][col] = -1
# top
if row - 1 >= 0:
if (arr[row - 1][col] == 1):
ans[row][col] = 1
arr[row][col] = 0
if (ratMaze(arr, row - 1, col, ans)):
return True
else:
ans[row][col] = 0
arr[row][col] = -1
return False
arr = [[1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 1]]
ans = [[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]
ratMaze(arr, 0, 0, ans)
pprint(ans) | StarcoderdataPython |
3237087 | <gh_stars>1-10
from utils.world_utils import (
SECTOR_BYTES,
SECTOR_INTS,
CHUNK_HEADER_SIZE,
VERSION_GZIP,
VERSION_DEFLATE,
block_coords_to_chunk_coords,
chunk_coords_to_region_coords,
region_coords_to_chunk_coords,
blocks_slice_to_chunk_slice,
gunzip,
from_nibble_array,
)
from utils.misc_utils import Int
| StarcoderdataPython |
5193213 | import os
import requests
api_token = os.getenv('THEMOVIEDB_API_KEY')
def get_json(path, **params):
url = f'https://api.themoviedb.org/3/{path}'
all_params = {'api_key': api_token} | params
try:
r = requests.get(url, params=all_params)
except requests.exceptions.RequestException:
print('Houve algum problema com a conexão.')
return
else:
return r.json()
def get_movie_info(movie_id):
data = get_json(f'/movie/{movie_id}', language='pt-BR', append_to_response='credits')
if not data:
return
credits = data['credits']
return {
'title': data['title'],
'original_title': data['original_title'],
'release_year': data['release_date'].split('-')[0],
'director': [member['name'] for member in credits['crew']
if member['job'] == 'Director'][0],
'cast': [member['name'] for member in credits['cast']][:10],
'poster': f'https://image.tmdb.org/t/p/w154{data["poster_path"]}'
}
| StarcoderdataPython |
11267751 | from src.Devices.Sensors.BME280 import BME280
from src.Devices.Sensors.BME680 import BME680
from src.Devices.Sensors.CCS811 import CCS811
from src.Devices.Sensors.DS18B20 import DS18B20
from src.Devices.Sensors.LTR559 import LTR559
from src.Devices.Sensors.PMS5003 import PMS5003
class Factory:
@staticmethod
def create_sensor(device, address):
if device == 'BME280':
return BME280(address=address)
elif device == 'BME680':
return BME680(address=address)
elif device == 'DS18B20':
return DS18B20(address=address)
elif device == 'CCS811':
return CCS811(address=address)
elif device == 'LTR559':
return LTR559(address=address)
elif device == 'PMS5003':
return PMS5003(address=address)
| StarcoderdataPython |
9734784 | <gh_stars>0
# 01 - Dada a lista l = [5, 7, 2, 9, 4, 1, 3], escreva um programa
# que imprima as seguintes informações:
#
# a) tamanho da lista.
# b) maior valor da lista.
# c) menor valor da lista.
# d) soma de todos os elementos da lista.
# e) lista em ordem crescente.
# f) lista em ordem decrescente.
l = [5, 7, 2, 9, 4, 1, 3]
soma = 0
cresc = l[:]
cresc.sort()
desc = l[:]
desc.sort()
desc.reverse()
for n in range(0, len(l)):
soma += l[n]
print(f'''
Lista {l}
Tamanho da lista: {len(l)}
Maior valor da lista: {max(l)}
Menor valor da lista: {min(l)}
Soma de todos os elementos da lista: {soma}
Lista em ordem crescente: {cresc}
Lista em ordem decrescente: {desc}
''') | StarcoderdataPython |
3516867 | <filename>deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/EXT/paletted_texture.py
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_paletted_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_paletted_texture',error_checker=_errors._error_checker)
GL_COLOR_INDEX12_EXT=_C('GL_COLOR_INDEX12_EXT',0x80E6)
GL_COLOR_INDEX16_EXT=_C('GL_COLOR_INDEX16_EXT',0x80E7)
GL_COLOR_INDEX1_EXT=_C('GL_COLOR_INDEX1_EXT',0x80E2)
GL_COLOR_INDEX2_EXT=_C('GL_COLOR_INDEX2_EXT',0x80E3)
GL_COLOR_INDEX4_EXT=_C('GL_COLOR_INDEX4_EXT',0x80E4)
GL_COLOR_INDEX8_EXT=_C('GL_COLOR_INDEX8_EXT',0x80E5)
GL_TEXTURE_INDEX_SIZE_EXT=_C('GL_TEXTURE_INDEX_SIZE_EXT',0x80ED)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glColorTableEXT(target,internalFormat,width,format,type,table):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glGetColorTableEXT(target,format,type,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetColorTableParameterfvEXT(target,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetColorTableParameterivEXT(target,pname,params):pass
| StarcoderdataPython |
12849935 | #!/usr/bin/env python3
"""script for testing connection to database"""
import pyodbc
import sys
import os
from models.user import User
import models
driver = os.environ.get('CONTACT_SQL_DRIVER')
server = os.environ.get('CONTACT_SQL_SERVER')
database = os.environ.get('CONTACT_SQL_DB')
username = os.environ.get('CONTACT_SQL_USER')
password = <PASSWORD>('<PASSWORD>')
try:
statement = "SELECT * FROM {}".format(sys.argv[1])
except:
print("please provide a table as an argument")
print("usage: ./build_list.py user_table_name")
exit(1)
needed = [driver, server, database, username, password, statement]
for req in needed:
if req is None:
print('Failed to get variable from env settings')
exit(1)
# build the connection string after verifying attributes were provided
conn_str = 'Driver={};Server={};Database={};Uid={};Pwd={};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;'.format(
driver,
server,
database,
username,
password)
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
cursor.execute(statement)
row = cursor.fetchall()
print('got rows')
for r in row:
u = User(first_name=r[1], last_name=r[2], email=r[3], phone=r[4])
u.save()
print(u)
models.storage.save()
| StarcoderdataPython |
8032023 | # Copyright (c) The University of Edinburgh 2014-2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a dispel4py graph which produces a workflow that splits the data and
sends it to two nodes (cons1 and cons2) and the output of those two nodes is
merged by another node (last).
.. image:: /images/split_merge.png
It can be executed with MPI and STORM.
* MPI: Please, locate yourself into the dispel4py directory.
Execute the MPI mapping as follows::
mpiexec -n <number mpi_processes> dispel4py mpi <module|module file>\\
[-a name_dispel4py_graph]\\
[-f file containing the input dataset in JSON format]\\
[-i number of iterations/runs']\\
[-s]
The argument '-s' forces to run the graph in a simple processing, which
means that the first node of the graph will be executed in a process, and
the rest of nodes will be executed in a second process.
When [-i number of interations/runs] is not indicated, the graph is
executed once by default.
For example::
mpiexec -n 4 dispel4py mpi dispel4py.examples.graph_testing.split_merge
.. note::
Each node in the graph is executed as a separate MPI process.
This graph has 4 nodes. For this reason we need at least 4 MPI
processes to execute it.
Output::
Processing 1 iteration.
Processes: {'TestProducer0': [1], 'TestOneInOneOutWriter2': [2], \
'TestTwoInOneOut3': [0], 'TestOneInOneOut1': [3]}
TestProducer0 (rank 1): Processed 1 iteration.
TestOneInOneOut1 (rank 3): Processed 1 iteration.
TestOneInOneOutWriter2 (rank 2): Processed 1 iteration.
TestTwoInOneOut3 (rank 0): Processed 2 iterations.
'''
from dispel4py.examples.graph_testing import testing_PEs as t
from dispel4py.workflow_graph import WorkflowGraph
def testSplitMerge():
'''
Creates the split/merge graph with 4 nodes.
:rtype: the created graph
'''
graph = WorkflowGraph()
prod = t.TestProducer(2)
cons1 = t.TestOneInOneOut()
cons2 = t.TestOneInOneOutWriter()
last = t.TestTwoInOneOut()
graph.connect(prod, 'output0', cons1, 'input')
graph.connect(prod, 'output1', cons2, 'input')
graph.connect(cons1, 'output', last, 'input0')
graph.connect(cons2, 'output', last, 'input1')
return graph
''' important: this is the graph_variable '''
graph = testSplitMerge()
| StarcoderdataPython |
3492777 | <gh_stars>0
# !/usr/bin/env python3
# Author: C.K
# Email: <EMAIL>
# DateTime:2021-09-11 10:13:05
# Description:
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
d1, d2 = {}, {}
for i, val in enumerate(s):
d1[val] = d1.get(val, []) + [i]
for i, val in enumerate(t):
d2[val] = d2.get(val, []) + [i]
return sorted(d1.values()) == sorted(d2.values())
if __name__ == "__main__":
pass
| StarcoderdataPython |
349469 | <reponame>realandrewyang/let-me-in<gh_stars>0
import pandas as pd
import numpy as np
# States of a course
NOT_FOUND = -1
OPEN = 0
FILLED = 1
OVERFILLED = 2
# Checks a course capacity given a course id
# and returns the state and the number of free spots
# Parameters:
# course - pandas dataframe row
# course_id - str
# Output:
# List[enum, int]
def check_course_core(course, course_id):
return [OPEN if (course["Enrl Tot"] < course["Enrl Cap"]).bool()
else OVERFILLED if (course["Enrl Tot"] > course["Enrl Cap"]).bool()
else FILLED,
int(course["Enrl Cap"] - course["Enrl Tot"])]
# Checks a course capacity given a course id
# and returns the state and the number of free spots
# Parameters:
# d - pandas dataframe
# course_id - str
# Output:
# List[enum, int]
def check_course(d, course_id):
try:
return check_course_core(d.loc[d["Class"] == str(course_id)], np.int64(course_id))
except KeyError:
return [NOT_FOUND, 0]
# Produces a message given a course result and name
# Parameters:
# result - List[enum, int]
# course_name = str
# Output:
# str
def create_message(result, course_name):
return course_name + ": " + ("not found" if result[0] == NOT_FOUND else "filled" if result[0] == FILLED else str(result[1] * -1) + " spot(s) overfilled" if result[0] == OVERFILLED else str(result[1]) + " spot(s) open")
| StarcoderdataPython |
6537235 | <filename>tests/test_te_python.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `te_python` module."""
import pytest
import requests
from te_python import te_python
def test_te_python_initialization():
response = te_python.email_get_details('a53b7747d6bd3f59076d63469d92924e00f407ff472e5a539936453185ecca6c')
assert isinstance(response, dict)
def test_update_api_url():
# make a request to localhost (which should fail)... this makes sure that the base_api_url is being properly used
with pytest.raises(requests.ConnectionError):
te_python.email_get_details('a53b7747d6bd3f59076d63469d92924e00f407ff472e5a539936453185ecca6c')
| StarcoderdataPython |
6585074 | <filename>examples/benchmark.py<gh_stars>0
# coding:utf-8
import glob
import os
import shutil
import timeit
from statistics import mean
from lxml.html import fromstring
from selectolax.parser import HTMLParser
pages = glob.glob('examples/pages/*.html')
html_pages = [open(x, encoding='utf-8', errors='ignore').read() for x in pages]
selector_css = "cite.iUh30"
selector_xpath = '//cite[contains(@class, "iUh30")]'
def modest_parser(html_pages, selector):
all_links = []
for page in html_pages:
links = [node.text(deep=False) for node in HTMLParser(page).css(selector)]
assert len(links) >= 6
all_links.extend(links)
return all_links
def lxml_parser(html_pages, selector):
all_links = []
for page in html_pages:
h = fromstring(page)
links = [e.text for e in h.xpath(selector)]
assert len(links) >= 6
all_links.extend(links)
return all_links
print('modest', mean(timeit.repeat('modest_parser(html_pages, selector_css)', globals=globals(), repeat=10, number=1)))
print('lxml', mean(timeit.repeat('lxml_parser(html_pages, selector_xpath)', globals=globals(), repeat=10, number=1)))
| StarcoderdataPython |
6495449 | """Create done and validated columns
Revision ID: 1a29f9f5c21c
Revises: None
Create Date: 2014-04-18 17:30:32.450777
"""
# revision identifiers, used by Alembic.
revision = '1a29f9f5c21c'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('done', sa.Integer(), nullable=True))
op.add_column('project', sa.Column('validated', sa.Integer(), nullable=True))
project = sa.sql.table('project', sa.sql.column('done'),
sa.sql.column('validated'))
op.execute(project.update().values(done=0))
op.execute(project.update().values(validated=0))
op.alter_column('project', 'done', nullable=False)
op.alter_column('project', 'validated', nullable=False)
def downgrade():
op.drop_column('project', 'validated')
op.drop_column('project', 'done')
| StarcoderdataPython |
1885862 | <gh_stars>0
from __future__ import annotations
from typing import Any, Dict, Optional, Protocol
__all__ = ("Snowflake", "Object", "to_snowflake")
class Snowflake(Protocol):
"""
A class that represents a Snowflake.
Attributes:
id (int): The Snowflake ID.
"""
id: int
class Object(Snowflake):
"""
A class that represents an object.
Attributes:
id (int): The Snowflake ID.
"""
def __init__(self, id: int) -> None:
self.id = id
def __repr__(self) -> str:
return f"<Object id={self.id}>"
def to_snowflake(data: Dict[str, Any], key: str) -> Optional[int]:
value = data.get(key)
if not value:
return None
return int(value)
| StarcoderdataPython |
6467611 | <reponame>unicef/rapidpro-webhooks<filename>rapidpro_webhooks/apps/eum/supply_shipments.py
import datetime
import json
import random
from flask import abort, Blueprint, g, request
import couchdbkit
from rapidpro_webhooks.apps.core.decorators import limit
from rapidpro_webhooks.apps.core.helpers import create_response, rule_link, slugify
eum_bp = Blueprint('eum', __name__)
demo_commodities = ['bednets', 'ors', 'plumpynut', 'textbooks']
demo_vendors = ['Acme', 'Parner Org.', 'Local NGO']
def _format_phone(phone):
""" expects E164 msisdn, returns without leading + """
assert phone is not None
# TODO rapidpro will send E164, but this should ensure E164 so
# other callers can be easily supported
return phone
def _generate_shipment(phone=None):
assert phone is not None
shipment = {
'amount': random.randrange(100, 20000),
'commodity': random.choice(demo_commodities),
'vendor': random.choice(demo_vendors),
'expected': (datetime.datetime.utcnow().date() + datetime.timedelta(days=random.randrange(3, 180))).isoformat()
}
return shipment
def _update_shipment_status(request, labels):
if request.json is not None:
data = request.json
else:
data = request.values.to_dict()
data.update({k: json.loads(v) for k, v in data.items()
if k in ['values', 'steps']})
if data:
phone = _format_phone(data.get('phone'))
values = data.get('values')
if phone:
shipments_doc = g.db.open_doc('shipments-%s' % phone)
shipments_status = shipments_doc.get('shipments-status', [])
shipment_data = {}
for value in values:
if value['label'].upper() in labels:
shipment_data.update({slugify(value['label']):
value['value']})
shipment_data.update({'webhook_data': data})
shipments_status.append(shipment_data)
shipments_doc.update({'shipments-status': shipments_status})
g.db.save_doc(shipments_doc)
return shipment_data
def get_or_create_shipments_doc(phone=None):
assert phone is not None
try:
shipments_doc = g.db.open_doc('shipments-%s' % phone)
shipments = shipments_doc.get('shipments', [])
shipments_status = shipments_doc.get('shipments-status', [])
# TODO actually check for an outstanding shipment
if ((shipments_status is None) or (shipments is None))\
or (len(shipments) == len(shipments_status)):
shipments.append(_generate_shipment(phone))
shipments_doc.update({'shipments': shipments})
g.db.save_doc(shipments_doc)
except couchdbkit.ResourceNotFound:
new_shipments_doc = {'_id': 'shipments-%s' % phone,
'shipments': [_generate_shipment(phone)]}
shipments_doc = g.db.save_doc(new_shipments_doc)
return shipments_doc
@eum_bp.route('/shipments', methods=['POST'])
@limit(max_requests=10, period=60, by="ip")
def expected_shipments_for_contact():
if request.json is not None:
data = request.json
else:
data = request.values
if data:
phone = _format_phone(data.get('phone'))
if phone:
shipments_doc = get_or_create_shipments_doc(phone)
shipments_doc = g.db.open_doc('shipments-%s' % phone)
shipments = shipments_doc.get('shipments')
if shipments:
return create_response({'shipment': shipments.pop(),
'_links': {'self': rule_link(request.url_rule)}})
abort(400)
SHIPMENT_RECEIVED_FIELDS = ['RECEIPT OF COMMODITY', 'DATE RECEIVED',
'AMOUNT RECEIVED', 'SHIPMENT CONDITION']
@eum_bp.route('/shipment-received', methods=['POST'])
@limit(max_requests=10, period=60, by="ip")
def shipment_received():
""" Called when shipment has been received by end user """
shipment = _update_shipment_status(request, SHIPMENT_RECEIVED_FIELDS)
if shipment:
return create_response({'shipment': shipment,
'_links': {'self':
rule_link(request.url_rule)}})
abort(400)
SHIPMENT_UPDATE_FIELDS = ['RECEIPT OF COMMODITY', 'INFORMED OF DELAY',
'REVISED DATE ESTIMATE']
@eum_bp.route('/update-shipment', methods=['POST'])
@limit(max_requests=10, period=60, by="ip")
def update_shipment():
""" Called when shipment status is updated by end user """
shipment = _update_shipment_status(request, SHIPMENT_UPDATE_FIELDS)
# TODO if we have a revised date estimate,
# then schedule flow for after revised date
if shipment:
return create_response({'shipment': shipment,
'_links': {'self':
rule_link(request.url_rule)}})
abort(400)
@eum_bp.route('/', methods=['GET'])
def home():
return create_response({'app': 'Supply Shipments'})
| StarcoderdataPython |
3596504 | # Copyright (c) 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration
def generate_launch_description():
grid_map_demos_dir = get_package_share_directory('robotx_costmap_calculator')
#visualization_config_file = LaunchConfiguration('visualization_config')
pointcloud_to_gridmap_node = Node(
package='robotx_costmap_calculator',
executable='costmap_calculator_node',
name='costmap_calculator_node',
output='screen')
"""
declare_visualization_config_file_cmd = DeclareLaunchArgument(
'visualization_config',
default_value=os.path.join(
grid_map_demos_dir, 'config', 'pc_to_grid.yaml'),
description='Full path to the Gridmap visualization config file to use')
grid_map_visualization_node = Node(
package='grid_map_visualization',
executable='grid_map_visualization',
name='grid_map_visualization',
output='screen',
parameters=[visualization_config_file]
)
"""
# Create the launch description and populate
ld = LaunchDescription()
# Add launch arguments to the launch description
#ld.add_action(declare_visualization_config_file_cmd)
ld.add_action(pointcloud_to_gridmap_node)
# Add node actions to the launch description
#ld.add_action(grid_map_visualization_node)
return ld | StarcoderdataPython |
11390245 | #!/usr/bin/env python3
#
# __init__.py
"""
Extensions to :mod:`sphinx.ext.autodoc`.
.. versionadded:: 0.6.0
"""
#
# Copyright © 2020-2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/sphinx-doc/sphinx
# | Copyright (c) 2007-2020 by the Sphinx team (see AUTHORS file).
# | BSD Licensed
# | All rights reserved.
# |
# | Redistribution and use in source and binary forms, with or without
# | modification, are permitted provided that the following conditions are
# | met:
# |
# | * Redistributions of source code must retain the above copyright
# | notice, this list of conditions and the following disclaimer.
# |
# | * Redistributions in binary form must reproduce the above copyright
# | notice, this list of conditions and the following disclaimer in the
# | documentation and/or other materials provided with the distribution.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# stdlib
from typing import TYPE_CHECKING, Any, List, Tuple
# 3rd party
from sphinx.application import Sphinx
# this package
from sphinx_toolbox.utils import SphinxExtMetadata, metadata_add_version
if TYPE_CHECKING:
# 3rd party
from sphinx.ext.autodoc import ObjectMembers
ObjectMembers = ObjectMembers
else:
ObjectMembers = List[Tuple[str, Any]]
__all__ = ["setup"]
@metadata_add_version
def setup(app: Sphinx) -> SphinxExtMetadata:
"""
Setup :mod:`sphinx_toolbox.more_autodoc`.
:param app: The Sphinx application.
"""
# Setup sub-extensions
app.setup_extension("sphinx_toolbox.more_autodoc.augment_defaults")
app.setup_extension("sphinx_toolbox.more_autodoc.autoprotocol")
app.setup_extension("sphinx_toolbox.more_autodoc.autotypeddict")
app.setup_extension("sphinx_toolbox.more_autodoc.autonamedtuple")
app.setup_extension("sphinx_toolbox.more_autodoc.genericalias")
app.setup_extension("sphinx_toolbox.more_autodoc.typehints")
app.setup_extension("sphinx_toolbox.more_autodoc.variables")
app.setup_extension("sphinx_toolbox.more_autodoc.sourcelink")
app.setup_extension("sphinx_toolbox.more_autodoc.no_docstring")
app.setup_extension("sphinx_toolbox.more_autodoc.regex")
app.setup_extension("sphinx_toolbox.more_autodoc.typevars")
app.setup_extension("sphinx_toolbox.more_autodoc.overloads")
app.setup_extension("sphinx_toolbox.more_autodoc.generic_bases")
return {"parallel_read_safe": True}
| StarcoderdataPython |
1718947 | <reponame>bohblue2/lobpy
"""
Copyright (c) 2018, University of Oxford, Rama Cont and ETH Zurich, <NAME>
calibration.py
Contains objects and functions for calibration of the LOB models.
To calibrate dynamics to a given time series of data on bid and ask side, data_bid and data_ask, with time stamps time_stamps which is uniform with time increment time_incr, e. g. use
>>> cal = OrderVolumeCalibrator()
>>> cal.calibrate(time_stamps, time_stamps[1] - time_stamps[0], data_bid, data_ask)
"""
######
# Imports
######
import copy
from collections import defaultdict
import csv
import json
import math
import warnings
import numpy as np
import scipy.optimize as sopt
import pandas as pd
from lobpy.models.loblineartools import LOBProfile
from lobpy.models.loblineartools import LOBFactorProcess
from lobpy.models.loblinear import LOBLinear
from lobpy.models.loblinear import LOBLinearTwoFactor
from lobpy.models.loblinear import OrderVolumeMeanRev
from lobpy.models import estimators as est
CORRELATION_PAR_STR = "Correlation"
class ParameterHistory():
""" Log of parameters for linear SPDE market models
-------
time_stamps: list of time stamps
params_bid: defaultdict with lists of parameters of bid side
params_ask: defaultdict with lists of parameters of ask side
params_correlation: list with correlation values
"""
def __init__(self):
self.time_stamps = []
self.params_bid = defaultdict(list)
self.params_ask = defaultdict(list)
self.params_correlation = []
def __str__(self):
return str(self.to_list())
def _check_len(self):
""" Checks if each list in the history object has the same length """
num_t = len(self.time_stamps)
for key_bid, list_bid, key_ask, list_ask in zip(self.params_bid.items(), self.params_ask.items()):
if (num_t != len(list_bid)):
warnings.warn("Size of time stamps is not consistent with lengths of {}".format(key_ask))
return False
if (num_t != len(list_ask)):
warnings.warn("Size of time stamps is not consistent with lengths of {}".format(key_ask))
return False
return True
def to_list(self, paramlist=None):
"""
Returns parameters in form of a list of lists of format.
----------
args:
paramlist=None list of form ['outparamkey1', outparamkey2'] in which output is written
Output:
[['Time', timepoint1, timepoint2,...], [param1_bid, param1_bid[timepoint1],...],...]
if paramlist is not None, then
[['Time', timepoint1, timepoint2,...], [ outparamkey1', outparamvalue1[timepoint1],...],..., ['rho', ...]
"""
outvals = [["Time"]]
outvals[0] = outvals[0] + self.time_stamps
if paramlist is None:
for ctr, (key, item) in enumerate(self.params_bid.items()):
outvals.append([key+"_bid"] + item)
outvals.append([key+"_ask"] + self.params_ask[key])
else:
for key in paramlist:
outvals.append([key+"_bid"] + self.params_bid[key])
outvals.append([key+"_ask"] + self.params_ask[key])
outvals.append([CORRELATION_PAR_STR] + self.params_correlation)
return outvals
def to_dict(self, paramlist=None):
"""
Returns parameters in form of a dict of lists.
----------
args:
paramlist=None list of form ['outparamkey1', outparamkey2'] with keys which should be considered. Time and Corellation are always included.
"""
out = dict()
out['Time'] = self.time_stamps
if paramlist is None:
for ctr, (key, item) in enumerate(self.params_bid.items()):
out[key+"_bid"] = item
out[key+"_ask"] = self.params_ask[key]
else:
for key in paramlist:
out[key+"_bid"] = self.params_bid[key]
out[key+"_ask"] = self.params_ask[key]
out[CORRELATION_PAR_STR] = self.params_correlation
return out
def get_modelpardict_t(self, time_index, modelid="model_from_calhist"):
"""
Returns the model parameter as a dict given a time index time_index for the model history
"""
out = {key: (self.params_bid[key][time_index], self.params_ask[key][time_index]) for key in self.params_bid.keys()}
out['rho'] = self.params_correlation[time_index]
out['modelid'] = modelid + "_tind_" + str(time_index)
return out
def append(self, time_stamp, two_factor_model):
""" adds parameter from two factor model with given time stamp to the history """
self.time_stamps.append(float(time_stamp))
param_dict = two_factor_model.get_modelpar_dict()
for key, val in param_dict.items():
if key in ('modelid', 'pmax'):
continue
if (key == 'rho'):
self.params_correlation.append(val)
continue
self.params_bid[key].append(val[0])
self.params_ask[key].append(val[1])
return True
def savef(self, filename, csv=True, paramlist=None):
""" Saves the model history to a file in format:
a) csv format if csv==True using pandas or
b) json format, else
The columns are [time, bid parameters, ask parameters, correlation].
paramlist can be optionally given to fix the order and selection of parameters. See also to_list for.
"""
outvals = self.to_list(paramlist=paramlist)
if csv:
data_frame = pd.DataFrame(outvals)
data_frame = data_frame.transpose()
data_frame.to_csv(filename+'.csv', index=False, header=False)
else:
with open(filename, "w") as outfile:
try: json.dumps(outvals, outfile)
except TypeError:
print("Unable to serialize parameter history")
return False
return True
def loadf_json(self, filename):
""" Loads the model history to a file from json format. Warning: This method overwrites the history object.
"""
if (len(self.time) > 0):
warnings.warn("Loading history from file {} overwrites history object".format(filename))
with open(filename, "r") as infile:
invals = json.load(infile)
self.time_stamps = invals[0]["Time"]
self.params_bid = invals[1]
self.params_ask = invals[2]
self.params_correlation = invals[3][CORRELATION_PAR_STR]
return True
####
def read_calhistory_csv(filename):
"""
Loads the model history to a file from csv format and returns a CalibrationHistory object.
"""
out = CalibrationHistory()
invals = pd.read_csv(filename)
out.time_stamps = invals["Time"].tolist()
out.params_correlation = invals[CORRELATION_PAR_STR].tolist()
for key_baflag in invals.columns:
if key_baflag in ("Time", CORRELATION_PAR_STR):
#these values are read in already
continue
elif "_" not in key_baflag:
warnings.warn("Unkown column found in file: {}. Values will be ignored.".format(key_baflag), RuntimeWarning)
continue
try:
key, baflag = key_baflag.split("_")
except ValueError:
warnings.warn("Unkown column found in file: {}. Expected: key_bid or key_ask. Values will be ignored.".format(key_baflag), RuntimeWarning)
continue
if baflag == "bid":
out.params_bid[key] = invals[key_baflag].tolist()
elif baflag == "ask":
out.params_ask[key] = invals[key_baflag].tolist()
else:
warnings.warn("Unkown column found in file: {1}. {2} should be bid or ask. Values will be ignored.".format(key_baflag, baflag), RuntimeWarning)
return out
class CalibrationHistory(ParameterHistory):
pass;
class PredictionHistory(ParameterHistory):
pass;
class ModelCalibrator():
""" This class is designed to calibrate linear models and store its history
"""
def __init__(self):
self.calibratorid = "ModelCalibrator"
self.model = LOBLinear()
self.history = CalibrationHistory()
def set_id(self, calibratorid):
self.calibratorid = calibratorid
return
def get_id(self):
return self.calibratorid
def savef_history(self, csv=True):
""" Saves the calibration history. """
self.history.savef(self.get_id(), csv)
return True
def loadf_history_json(self, filename):
""" Loads calibration history from file """
self.history.loadf_json(filename)
return True
def calibrate(
self,
time_stamp,
data_bid,
data_ask
):
""" Calibration to data history for bid and ask side
---------
args:
time_stamp: time point in float
data_bid: data on bid side at the time points
data_ask: data on ask side at the time points
output:
modelpar: returns the calibrated parameters
"""
pass;
class OrderVolumeCalibrator(ModelCalibrator):
""" This class calibrates a model and stores its history
---------
fields:
calibratorid: identifier string
model: current model to be calibrated
estimator_dynamics: function for estimation of (nu, mu, sigma)
estimator_corr: function for estimation of correlation (rho), if None, rho is set to 0.
estimator_dyn_corr If this estimator is set then it is assumed to return all parameters, incl. correlation
"""
def __init__(
self,
calibratorid="OrderVolumeCalibrator",
model=None,
estimator_dynamics=est.estimate_recgamma_diff,
estimator_corr=None,
estimator_dyn_corr=None
):
self.calibratorid = calibratorid
self.model = model
if model is None:
self.model = OrderVolumeMeanRev()
self.history = CalibrationHistory()
self.estimator_dynamics = estimator_dynamics
self.estimator_corr = estimator_corr
self.estimator_full=estimator_dyn_corr
def _calibrate_full(
self,
time_stamp,
time_incr,
data_bid,
data_ask
):
""" Calibration to data history for bid and ask side estimating all parameter with one estimator function
---------
args:
time_stamp: time point in float
time_incr: time increments in the array
data_bid: volume on bid side at the time points
data_ask: volume on ask side at the time points
n_start: starting index for the calibration
n_today: first index not to include for the calibration (None, if calibration to full array)
---------
output:
(params_bid, params_ask, rho): returns the calibrated parameters, each in format (mu, nu, sigma)
"""
params_bid, params_ask, rho = self.estimator_full(data_bid, data_ask, time_incr)
self.model.dynamics_bid.set_params(params_bid)
self.model.dynamics_ask.set_params(params_ask)
self.model.set_rho(rho)
self.history.append(time_stamp, self.model)
return params_bid, params_ask, rho
def calibrate(
self,
time_stamp,
time_incr,
data_bid,
data_ask
):
""" Calibration to data history for bid and ask side
---------
args:
time_stamp: time point in float
time_incr: time increments in the array
data_bid: volume on bid side at the time points
data_ask: volume on ask side at the time points
n_start: starting index for the calibration
n_today: first index not to include for the calibration (None, if calibration to full array)
---------
output:
(params_bid, params_ask, rho): returns the calibrated parameters, each in format (mu, nu, sigma)
if estimator_full is set, then the output will be the output of that function
"""
if not (self.estimator_full is None):
return self._calibrate_full(
time_stamp,
time_incr,
data_bid,
data_ask
)
self.model.set_z0((data_bid[-1], data_ask[-1]))
params_bid = self.estimator_dynamics(data_bid, time_incr)
self.model.dynamics_bid.set_params(params_bid)
params_ask = self.estimator_dynamics(data_ask, time_incr)
self.model.dynamics_ask.set_params(params_ask)
if self.estimator_corr is None:
self.model.set_rho(0.)
else:
self.model.set_rho(self.estimator_corr(data_bid, data_ask))
self.history.append(time_stamp, self.model)
return (params_bid, params_ask, self.model.get_rho())
def calibrate_running_frame(
self,
time_start,
time_discr,
data_bid,
data_ask,
num_timepoints_calib,
num_timepoints_recal=1,
):
"""
This function creates a price model induced by the mean reverting order book model and calibrates this model on a defined running time fram
----------------
args:
time_start: float time point at which data starts (calibration will start num_timepoints_calib later)
time_discr: float time between 2 time points
data_bid: data bid side (uniform time grid, starting at time_start)
data_ask: data ask side (uniform time grid, starting at time_start)
num_timepoints_calib: number of time points to be used for each calibration
num_timepoints_recal=1: number of time points after which recalibration starts
"""
# Convert to correct data type
time_start = float(time_start)
time_discr = float(time_discr)
num_timepoints_calib = int(num_timepoints_calib)
num_timepoints_recal = int(num_timepoints_recal)
print("Start calibration in time frame: {}".format(self.calibratorid))
for ctr_now in range(num_timepoints_calib-1, len(data_bid), num_timepoints_recal):
# Calibrate
# time_now = time_start + (ctr_start + num_timepoints_calib) * time_discr
self.calibrate(
time_start + ctr_now * time_discr,
time_discr,
data_bid[ctr_now - num_timepoints_calib+1:ctr_now+1:],
data_ask[ctr_now - num_timepoints_calib+1:ctr_now+1:]
)
return
def savef_history(self, csv=True):
""" Saves the calibration history. """
self.history.savef(self.get_id(), csv, paramlist=["z0", "mu", "nu", "sigma"])
return True
class LOBProfileCalibrator(ModelCalibrator):
""" This class calibrates a model and stores its history
---------
fields:
calibratorid: identifier string
fitting_method: valid choices are "LSQ", "TVLSQ", "TV-ArgMax", "TV-Rmax1"
"""
def __init__(
self,
calibratorid="LOBProfileCalibrator",
fitting_method="LSQ",
model=None
):
self.calibratorid = calibratorid
self.fitting_method = fitting_method
if model is None:
self.model = LOBLinearTwoFactor()
else:
self.model = model
self.history = CalibrationHistory()
def calibrate(self, time_stamp, data_bid, data_ask):
""" calibrates to data by fitting the method specified in .fitting_method
----------
args:
data_bid
data_ask: bid and ask profiles to fit data to (array format)
"""
if self.fitting_method == "LSQ":
self.model.init_leastsq(data_bid, data_ask)
if self.fitting_method == "LSQScaling":
self.model.init_leastsq(data_bid, data_ask, scaling=True)
elif self.fitting_method == "TVLSQ":
self.model.init_tv_leastsq(data_bid, data_ask)
elif self.fitting_method == "TV-ArgMax":
self.model.init_tv_argmax(data_bid,data_ask)
elif self.fitting_method == "TV-Rmax1":
self.model.init_tv_rmax1(data_bid, data_ask)
self.history.append(time_stamp, self.model)
return self.model.get_z0(), self.model.get_gamma()
def fit_profile_to_data(bidprofile, askprofile):
''' Creates and 4 LOBLinearTwoFactor models and fits them to the bid and ask profiles '''
warnings.warn("Method fit_profile_to_data might be removed in future.", FutureWarning)
model_lsq = LOBLinearTwoFactor()
model_lsqf = LOBLinearTwoFactor()
model_argmax = LOBLinearTwoFactor()
model_r_max1 = LOBLinearTwoFactor()
model_lsq.init_tv_leastsq(bidprofile, askprofile)
model_lsqf.init_leastsq(bidprofile, askprofile)
model_argmax.init_tv_argmax(bidprofile, askprofile)
model_r_max1.init_tv_rmax1(bidprofile, askprofile)
return model_lsq, model_lsqf, model_argmax, model_r_max1
| StarcoderdataPython |
1798371 | #!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
from __future__ import absolute_import
__all__ = ["TaurusPlot"]
from future.utils import string_types
import copy
from taurus.external.qt import QtGui, Qt
from taurus.core.util.containers import LoopList
from taurus.core.util.log import Logger
from taurus.qt.qtcore.configuration import BaseConfigurableClass
from pyqtgraph import PlotWidget
from .curvespropertiestool import CurvesPropertiesTool
from .taurusmodelchoosertool import TaurusXYModelChooserTool
from .legendtool import PlotLegendTool
from .datainspectortool import DataInspectorTool
from .y2axis import Y2ViewBox
from .curveproperties import CURVE_COLORS
class TaurusPlot(PlotWidget, BaseConfigurableClass):
"""
TaurusPlot is a general widget for plotting 1D data sets. It is an extended
taurus-aware version of :class:`pyqtgraph.PlotWidget`.
Apart from all the features already available in a regulat PlotWidget,
TaurusPlot incorporates the following tools/features:
- Secondary Y axis (right axis)
- A plot configuration dialog, and save/restore configuration
facilities
- A menu option for adding/removing models
- A menu option for showing/hiding the legend
- Automatic color change of curves for newly added models
"""
def __init__(self, parent=None, **kwargs):
if Qt.QT_VERSION < 0x050000:
# Workaround for issue when using super with pyqt<5
BaseConfigurableClass.__init__(self)
PlotWidget.__init__(self, parent=parent, **kwargs)
else:
super(TaurusPlot, self).__init__(parent=None, **kwargs)
# Compose with a Logger
self._logger = Logger(name=self.__class__.__name__)
self.debug = self._logger.debug
self.info = self._logger.info
self.warning = self._logger.warning
self.error = self._logger.error
# set up cyclic color generator
self._curveColors = LoopList(CURVE_COLORS)
self._curveColors.setCurrentIndex(-1)
# add save & retrieve configuration actions
menu = self.getPlotItem().getViewBox().menu
saveConfigAction = QtGui.QAction("Save configuration", menu)
saveConfigAction.triggered.connect(self._onSaveConfigAction)
menu.addAction(saveConfigAction)
loadConfigAction = QtGui.QAction("Retrieve saved configuration", menu)
loadConfigAction.triggered.connect(self._onRetrieveConfigAction)
menu.addAction(loadConfigAction)
self.registerConfigProperty(self._getState, self.restoreState, "state")
# add legend tool
legend_tool = PlotLegendTool(self)
legend_tool.attachToPlotItem(self.getPlotItem())
# add model chooser
self._model_chooser_tool = TaurusXYModelChooserTool(self)
self._model_chooser_tool.attachToPlotItem(
self.getPlotItem(), self, self._curveColors
)
# add Y2 axis
self._y2 = Y2ViewBox()
self._y2.attachToPlotItem(self.getPlotItem())
# add plot configuration dialog
self._cprop_tool = CurvesPropertiesTool(self)
self._cprop_tool.attachToPlotItem(self.getPlotItem(), y2=self._y2)
# add a data inspector
inspector_tool = DataInspectorTool(self)
inspector_tool.attachToPlotItem(self.getPlotItem())
# enable Autorange
self.getPlotItem().getViewBox().enableAutoRange(True)
self._y2.enableAutoRange(True)
# Register config properties
self.registerConfigDelegate(self._model_chooser_tool, "XYmodelchooser")
self.registerConfigDelegate(self._y2, "Y2Axis")
self.registerConfigDelegate(self._cprop_tool, "CurvePropertiesTool")
self.registerConfigDelegate(legend_tool, "legend")
self.registerConfigDelegate(inspector_tool, "inspector")
# --------------------------------------------------------------------
# workaround for bug in pyqtgraph v<=0.10.0, already fixed in
# https://github.com/pyqtgraph/pyqtgraph/commit/52754d4859
# TODO: remove this once pyqtgraph v>0.10 is released
def __getattr__(self, item):
try:
return PlotWidget.__getattr__(self, item)
except NameError:
raise AttributeError(
"{} has no attribute {}".format(self.__class__.__name__, item)
)
# --------------------------------------------------------------------
def __getitem__(self, idx):
"""
Provides a list-like interface: items can be accessed using slice
notation
"""
return self.getPlotItem().listDataItems()[idx]
def __len__(self):
return len(self.getPlotItem().listDataItems())
def setModel(self, names):
"""Reimplemented to delegate to the model chooser"""
# support passing a string in names
if isinstance(names, string_types):
names = [names]
self._model_chooser_tool.updateModels(names)
def addModels(self, names):
"""Reimplemented to delegate to the model chooser"""
# support passing a string in names
if isinstance(names, string_types):
names = [names]
self._model_chooser_tool.addModels(names)
def _getState(self):
"""Same as PlotWidget.saveState but removing viewRange conf to force
a refresh with targetRange when loading
"""
state = copy.deepcopy(self.saveState())
# remove viewRange conf
del state["view"]["viewRange"]
return state
def setXAxisMode(self, x_axis_mode):
"""Required generic TaurusPlot API """
from taurus_pyqtgraph import DateAxisItem
if x_axis_mode == "t":
axis = DateAxisItem(orientation="bottom")
axis.attachToPlotItem(self.getPlotItem())
elif x_axis_mode == "n":
axis = self.getPlotItem().axes["bottom"]["item"]
if isinstance(axis, DateAxisItem):
axis.detachFromPlotItem()
else:
raise ValueError("Unsupported x axis mode {}".format(x_axis_mode))
def _onSaveConfigAction(self):
"""wrapper to avoid issues with overloaded signals"""
return self.saveConfigFile()
def _onRetrieveConfigAction(self):
"""wrapper to avoid issues with overloaded signals"""
return self.loadConfigFile()
def plot_main(
models=(),
config_file=None,
x_axis_mode="n",
demo=False,
window_name="TaurusPlot (pg)",
):
"""Launch a TaurusPlot"""
import sys
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(cmd_line_parser=None, app_name="taurusplot(pg)")
w = TaurusPlot()
# w.loadConfigFile('tmp/TaurusPlot.pck')
w.setWindowTitle(window_name)
if demo:
models = list(models)
models.extend(["eval:rand(100)", "eval:0.5*sqrt(arange(100))"])
w.setXAxisMode(x_axis_mode.lower())
if config_file is not None:
w.loadConfigFile(config_file)
if models:
w.setModel(models)
w.show()
ret = app.exec_()
# import pprint
# pprint.pprint(w.createConfig())
sys.exit(ret)
if __name__ == "__main__":
plot_main()
| StarcoderdataPython |
9649264 | <reponame>protonyx/labtronyx
"""
Getting Started
---------------
The typical use case for Labtronyx is as a library that can be imported and used to introduce automation capability
with external instruments. There are many cases, however, where automation with external instruments is the primary
goal of a Python script. For these cases, Labtronyx provides a Script class that handles much of the boiler-plate
operations needed to establish communication with an instrument.
Labtronyx scripts are objects that run commands sequentially from start to completion with an expected outcome. At the
end of a script, a PASS or FAIL designation is returned depending on pre-programmed conditions within the script. Any
class that subclasses the :class:`ScriptBase` class has access to a number of convenience functions to ease interaction
with external instruments or devices through the Labtronyx framework.
To create a Labtronyx script, all you need is a class that extends :class:`ScriptBase` and some code that
instructs the Labtronyx library to run the script::
import labtronyx
class TestScript(labtronyx.ScriptBase):
def run(self):
pass
if __name__ == '__main__':
labtronyx.runScriptMain()
To run this simple script, just execute the Python file from the command line.
Script Attributes
-----------------
Attributes provide additional information to Labtronyx about the script that can be used to catalog and identify
scripts in a large repository. It is recommended that scripts define these attributes:
* author
* version
* name
* description
* continueOnFail
* allowedFailures
Declaring Required Resources
----------------------------
Labtronyx is fundamentally an automation framework for instruments. Scripts that subclass :class:`ScriptBase` can
declare required resources by defining class attributes that instantiate the :class:`RequiredResource` class.::
import labtronyx
from labtronyx.script import ScriptBase, RequiredResource
class TestScript(ScriptBase):
dut = RequiredResource(deviceVendor='Test', deviceModel='Widget')
def run(self):
pass
if __name__ == '__main__':
labtronyx.runScriptMain()
The parameters for :class:`RequiredResource` are the same parameters passed to :func:`InstrumentManager.findResources`.
These parameters are matched against the key-value pairs returned by :func:`getProperties`. If the parameters are not
specific enough, Labtronyx may not be able to resolve the :class:`RequiredResource` attribute to one resource and cause
the script to FAIL because each required resource must resolve to exactly one resource. Resources can be force-resolved
by calling :func:`assignResource` with the attribute name and a UUID.
Parameters
----------
Scripts can specify parameters that must be provided when the script is opened by defining class attributes that
instantiate the :class:`ScriptParameter` class.::
import labtronyx
from labtronyx.script import ScriptBase, RequiredResource
class TestScript(ScriptBase):
param = ScriptParameter(attrType=str, required=False, defaultValue='Test')
def run(self):
pass
if __name__ == '__main__':
labtronyx.runScriptMain()
Script parameters are instantiated with the following parameters:
* `attrType` - Python class type
* `required` - True if the parameter is required, if False `defaultValue` can be specified
* `defaultValue` - Attribute value if not provided when the script is instantiated
Running Scripts
---------------
Labtronyx has a helper method that can be used to facilitate script execution from the command line::
if __name__ == '__main__':
labtronyx.runScriptMain()
Any file that contains exactly one subclass of :class:`ScriptBase` with that code snippet can be run from the
command line. Alternatively, the script can be instantiated and run by calling the `start` method::
ts = TestScript()
result = ts.start()
print "Script Result: " + result.result
Script Results
--------------
The default outcome or result of a script is a PASS designation. The developer is responsible for deciding when to
return a FAILURE. FAILURES can be set explicitly by calling the `fail` method, or by using on of the convenience
functions to FAIL on a certain condition. If the `continueOnFail` attribute is set, a FAILURE will not stop script
execution, but the outcome of the script will be reported as FAIL. If script execution needs to be stopped on a FAILURE
condition, the `stop` parameter of the `fail` method can be set, or any of the convenience functions beginning with
`assert` will cause execution to halt when the condition is met.
"""
import os
import time
import threading
import ctypes
import logging
# Package relative imports
from ..common.log import RotatingMemoryHandler, CallbackLogHandler
from ..common import events
from ..common.errors import *
from ..common.plugin import PluginBase, PluginAttribute, PluginParameter, PluginDependency
from .resource import ResourceBase
__all__ = ['ScriptBase', 'ScriptParameter', 'ScriptResult', 'RequiredResource']
class ScriptBase(PluginBase):
"""
Script base class, modeled after the Python unittest framework.
:param manager: InstrumentManager instance
:type manager: labtronyx.InstrumentManager
:param logger: Logger instance
:type logger: logging.Logger
"""
pluginType = 'script'
name = PluginAttribute(attrType=str, defaultValue='')
description = PluginAttribute(attrType=str, defaultValue='')
category = PluginAttribute(attrType=str, defaultValue='General')
subcategory = PluginAttribute(attrType=str, defaultValue='')
continueOnFail = PluginAttribute(attrType=bool, defaultValue=False)
allowedFailures = PluginAttribute(attrType=int, defaultValue=0)
logToFile = PluginAttribute(attrType=bool, defaultValue=True)
def __init__(self, manager, **kwargs):
PluginBase.__init__(self, check_dependencies=False, **kwargs)
self._manager = manager
if not self.continueOnFail:
self.allowedFailures = 0
self._scriptThread = None
self._runLock = threading.Lock()
self._results = []
self._status = ''
self._progress = 0
self.__logger = logging.getLogger('labtronyx.%s' % self.uuid)
self._formatter = logging.Formatter('%(asctime)s %(levelname)-8s - %(message)s')
# Memory handler
self._handler_mem = RotatingMemoryHandler(100)
self._handler_mem.setFormatter(self._formatter)
self.__logger.addHandler(self._handler_mem)
# ZMQ Event handler
self._handler_evt = CallbackLogHandler(
lambda record: self.manager._publishEvent(events.EventCodes.script.log, self.uuid, record)
)
self._handler_evt.setFormatter(self._formatter)
self.__logger.addHandler(self._handler_evt)
# File handler
self._handler_file = None
@property
def manager(self):
return self._manager
@property
def result(self):
return self._results
@result.setter
def result(self, new_result):
if isinstance(new_result, ScriptResult):
self._results.append(new_result)
else:
raise TypeError("Result must be a ScriptResult type")
@property
def logger(self):
return self.__logger
@property
def current_test_result(self):
return self._scriptThread.result
def createFileLogHandler(self, filename=None):
"""
Create a file log handler to store script logs. Called automatically by the default :func:`setUp` method if
logToFile is True. Removes any existing file log handlers.
:param filename: filename of new log file
:type filename: str
"""
if self._handler_file is not None:
self.__logger.removeHandler(self._handler_file)
if filename is None:
filename = time.strftime("%Y%m%d-%H%M%S-" + self.fqn + ".log")
try:
import appdirs
dirs = appdirs.AppDirs("Labtronyx", roaming=True)
log_path = dirs.user_log_dir
if not os.path.exists(log_path):
os.makedirs(log_path)
filename = os.path.join(log_path, filename)
except:
pass
self.logger.info("Logging to file: %s", filename)
self._handler_file = logging.FileHandler(filename)
self._handler_file.setFormatter(self._formatter)
self.__logger.addHandler(self._handler_file)
def getLog(self):
"""
Get the last 100 log entries
:return: list
"""
return self._handler_mem.getBuffer()
def _validateParameters(self):
"""
Validate script parameters
:return: List of failure reasons, if any
:rtype: list[str]
"""
params = self._getClassAttributesByBase(ScriptParameter)
fails = []
for attr_name, attr_cls in params.items():
try:
attr_cls.validate(getattr(self, attr_name))
except Exception as e:
fails.append("ERROR: Script parameter %s %s" % (attr_name, e.message))
return fails
@classmethod
def getParameterInfo(cls):
"""
Get information about ScriptParameter objects.
:rtype: dict{str: dict}
"""
param_classes = cls._getClassAttributesByBase(ScriptParameter)
return {p_name: p_cls.getDict() for p_name, p_cls in param_classes.items()}
def getParameters(self):
"""
Get script instance parameters
:rtype: dict{str: object}
"""
param_classes = self._getClassAttributesByBase(ScriptParameter)
return {attr_name: self._getAttributeValue(attr_name) for attr_name in param_classes}
@classmethod
def getResourceInfo(cls):
"""
Get information about RequiredResource objects.
:rtype: dict{str: dict}
"""
param_classes = cls._getClassAttributesByBase(RequiredResource)
return {p_name: p_cls.getDict() for p_name, p_cls in param_classes.items()}
def resolveResources(self):
"""
Attempt to resolve all resource dependencies by iterating through all RequiredResource attributes and finding
matching resource objects
"""
self._resolveDependencies(check_dependencies=False)
def _validateResources(self):
"""
Validate resource dependencies.
:return: List of failure reasons, if any
:rtype: list[str]
"""
req_res = self.getResourceResolutionInfo()
fails = []
for attr_name, res_list in req_res.items():
if len(res_list) == 0:
fails.append("ERROR: Required resource %s could not resolve to any resource" % attr_name)
elif len(res_list) > 1:
fails.append("ERROR: Required resource %s matches more than one resource" % attr_name)
return fails
def assignResource(self, res_attribute, res_uuid):
"""
Assign a resource with a given uuid to the script attribute `res_attribute`. Used if a resource could not be
resolved to a single resource.
:param res_attribute: Script attribute name
:param res_uuid: Resource UUID
:raises: KeyError if res_attribute is not a valid RequiredResource attribute
:raises: ResourceUnavailable if res_uuid is not a valid resource
"""
res_info = self.getResourceResolutionInfo()
plug = self.manager.plugin_manager.getPluginInstance(res_uuid)
if res_attribute not in res_info:
raise KeyError("Resource attribute is not valid")
if plug is None:
raise ResourceUnavailable("Resource could not be found")
setattr(self, res_attribute, plug)
self.manager._publishEvent(events.EventCodes.script.changed, self.uuid)
def getResourceResolutionInfo(self):
"""
Get RequiredResource resolution information. Returns a dict with the attribute names as the keys and a list of
resolve Resource UUIDs as the value.
:rtype: dict{str: list}
"""
res_dict = {}
for attr_name, resolution in self._getAttributesByBase(RequiredResource).items():
if issubclass(type(resolution), ResourceBase):
# Resolved correctly
res_dict[attr_name] = [resolution.uuid]
elif type(resolution) in [list, tuple]:
res_dict[attr_name] = [res.uuid for res in resolution]
return res_dict
@classmethod
def getClassAttributes(cls):
"""
Get a dictionary of all class attributes
:rtype: dict{str: object}
"""
attr = super(ScriptBase, cls).getClassAttributes()
attr['resources'] = cls.getResourceInfo()
attr['parameters'] = cls.getParameterInfo()
return attr
def getProperties(self):
"""
Get script instance properties
:rtype: dict{str: object}
"""
props = super(ScriptBase, self).getProperties()
props.update(self.getAttributes())
props.update({
'ready': self.isReady(),
'running': self.isRunning(),
'status': self._status,
'progress': self._progress,
'results': [result.toDict() for result in self.result],
'resources': self.getResourceResolutionInfo()
})
return props
def isReady(self):
"""
Check if a script is ready to run. In order to run, a script must meet the following conditions:
* All resource dependencies must be resolved.
:return: True if ready, False if not ready
:rtype: bool
"""
return len(self._validateResources()) == 0
def isRunning(self):
"""
Check if the script is currently running.
:rtype: bool
"""
running = self._runLock.acquire(False)
if running: # lock was acquired
self._runLock.release()
return not running
def start(self):
"""
Script run routine to be called when executing the script. Returns the script result as a `ScriptResult` object.
`run` is protected from multiple thread execution using a lock.
:rtype: ScriptResult
"""
if self.isRunning():
raise RuntimeError("Script already running")
self._scriptThread = ScriptThread(self)
self._scriptThread.setDaemon(True)
self._scriptThread.start()
def stop(self):
"""
Stop a script that is running.
:return: True if script was stopped
:rtype: bool
"""
if self.isRunning():
return self._scriptThread.kill(ScriptStopException)
def join(self, timeout=None):
"""
Wait until the script thread has completed and is no longer alive.
:param timeout: timeout before returning
:type timeout: float
"""
if isinstance(self._scriptThread, ScriptThread) and self._scriptThread.isAlive():
self._scriptThread.join(timeout)
def setUp(self):
"""
Method called to prepare the script for execution. `setUp` is called immediately before `run`. Any exception
raised will cause script FAILURE and the `run` method will not be called.
Default behavior is to validate all `RequiredResource` and `ScriptParameter` objects and FAIL script if
resources could not be resolved or required parameters were not found.
This method can be overriden to change the behavior.
"""
if self.logToFile:
self.createFileLogHandler()
self._handler_mem.flush() # Clear log buffer
self.logger.info("Running script: %s", self.fqn)
spinUpFailures = []
spinUpFailures += self._validateParameters()
spinUpFailures += self._validateResources()
for error_str in spinUpFailures:
self.logger.error(error_str)
if len(spinUpFailures) > 0:
self.fail("Errors encountered during script setUp", True)
# Notify that the script is running now
self.setProgress(0)
self.setStatus('Running')
def tearDown(self):
"""
Method called after `run` has been called and after `onPass`, `onSkip` or `onFail` have been called, depending
on the result of the script.
Default behavior is to log script completion information like script result, failure reason, execution time,
etc.
This method can be overriden to change the behavior.
"""
self.setProgress(100)
self.setStatus('Finished')
self.manager._publishEvent(events.EventCodes.script.finished, self.uuid)
if self.current_test_result.executionTime > 0:
self.logger.info("Script Execution Time: %f", self.current_test_result.executionTime)
self.logger.info("Script Result: %s", self.current_test_result.result)
if self.current_test_result.result == ScriptResult.FAIL:
self.logger.info("Failure Reason: %s", self.current_test_result.reason)
self.logger.info("Failure Count: %d", self.current_test_result.failCount)
def run(self):
"""
Main script body, override this method in script subclasses to put all code. Any exceptions raised will be
handled and may cause script FAILURE (depending on behavior of `onException`).
"""
pass
def onException(self, e):
"""
Method called when an unhandled exception is caught. Default behavior is to log the exception and FAIL the
script. When called, script execution has already halted, there is no way to continue execution.
This method can be overriden to change the behavior.
:param e: Exception caught
:type e: Exception
"""
self.current_test_result.result = ScriptResult.FAIL
self.current_test_result.addFailure("Unhandled Exception: %s" % type(e))
self.logger.exception(self.current_test_result.reason)
def onPass(self):
"""
Method called when a script execution has finished with a PASS status. Default behavior is to do nothing.
This method can be overriden to change the behavior.
"""
pass
def onSkip(self):
"""
Method called when a script is halted due to a SKIP condition. Default behavior is to do nothing.
This method can be overriden to change the behavior.
"""
pass
def onFail(self):
"""
Method called when a script is halted due to a FAIL condition. Called after `onException` (if applicable) but
before `tearDown`. Default behavior is to do nothing.
This method can be overriden to change the behavior.
"""
pass
def setProgress(self, new_progress):
"""
Optional method to set the progress of a script. Useful for external tools or GUIs to report script progress.
:param new_progress: Progress (out of 100)
:type new_progress: int
"""
self._progress = max(0, min(int(new_progress), 100))
self.manager._publishEvent(events.EventCodes.script.changed, self.uuid)
def setStatus(self, new_status):
"""
Optional method to set the text status of the script. Useful for external tools or GUIs to report script status.
Use in conjunction with `setProgress`
:param new_status: Status
:type new_status: str
"""
self._status = str(new_status)
self.manager._publishEvent(events.EventCodes.script.changed, self.uuid)
def fail(self, reason, stop=False):
"""
Set the script result to FAIL. Execution will halt on the following conditions:
* `continueOnFail` attribute is False
* `allowedFailures` attribute has been exceeded
* `stop` parameter is True
:param reason: Reason for script failure
:type reason: str
:param stop: Flag to stop script execution
:type stop: bool
"""
self.current_test_result.result = ScriptResult.FAIL
self.current_test_result.addFailure(reason)
self.logger.info("FAILURE: %s", reason)
if stop:
raise ScriptStopException("Script failure, see failure reason")
elif self.current_test_result.failCount > self.allowedFailures:
raise ScriptStopException("Failure count exceeded allowed failures")
def skip(self, reason):
"""
Set the script result to SKIP and halt execution.
:param reason: Reason for script failure
:type reason: str
"""
self.current_test_result.result = ScriptResult.SKIP
self.current_test_result.addFailure(reason)
raise ScriptStopException("Skipped")
def assertEqual(self, a, b, msg=None):
self.expectEqual(a, b, msg, True)
def assertNotEqual(self, a, b, msg=None):
self.expectNotEqual(a, b, msg, True)
def assertTrue(self, a, msg=None):
self.expectTrue(a, msg, True)
def assertFalse(self, a, msg=None):
self.expectFalse(a, msg, True)
def assertIn(self, object, container, msg=None):
self.expectIn(object, container, msg, True)
def assertNotIn(self, object, container, msg):
self.expectNotIn(object, container, msg, True)
def expectEqual(self, a, b, msg=None, stop=False):
if msg is None:
msg = "%s != %s" % (a, b)
if a != b:
self.fail(msg, stop)
def expectNotEqual(self, a, b, msg=None, stop=False):
if msg is None:
msg = "%s == %s" % (a, b)
if a == b:
self.fail(msg, stop)
def expectTrue(self, a, msg=None, stop=False):
if msg is None:
msg = "%s is not True" % a
if not a:
self.fail(msg, stop)
def expectFalse(self, a, msg=None, stop=False):
if msg is None:
msg = "%s is not False" % a
if a:
self.fail(msg, stop)
def expectIn(self, object, container, msg=None, stop=False):
if msg is None:
msg = "%s not in %s" % (object, container)
if object not in container:
self.fail(msg, stop)
def expectNotIn(self, object, container, msg=None, stop=False):
if msg is None:
msg = "%s in %s" % (object, container)
if object in container:
self.fail(msg, stop)
class ScriptThread(threading.Thread):
def __init__(self, scriptObj):
assert (isinstance(scriptObj, ScriptBase))
super(ScriptThread, self).__init__()
self.__scriptObj = scriptObj
self.__scriptResult = ScriptResult()
self.setName('ScriptThread-%s' % self.script.uuid)
@property
def script(self):
return self.__scriptObj
@property
def result(self):
return self.__scriptResult
def kill(self, exc_type):
"""
Called asyncronously to kill a thread by raising an exception using the Python API and ctypes.
.. note::
If there is a profiler or debugger attached to the Python interpreter, there is a high chance this will
not work.
:param exc_type: Exception to throw
:type exc_type: type(Exception)
:returns: True if successful, False otherwise
:rtype: bool
"""
if self.isAlive():
self.result.result = ScriptResult.STOPPED
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, ctypes.py_object(exc_type))
if res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, 0)
return False
else:
return True
def run(self):
self.result.startTimer()
lockAcq = self.script._runLock.acquire(False)
if not lockAcq:
raise threading.ThreadError
try:
self.script.setUp()
self.script.run()
except ScriptStopException as e:
self.script.logger.info("Script Stopped: %s", e.message)
self.result.reason = "Script stopped"
except Exception as e:
# Handle all uncaught exceptions
self.script.onException(e)
finally:
if self.result.result == ScriptResult.PASS:
self.script.onPass()
elif self.result.result == ScriptResult.FAIL:
self.script.onFail()
elif self.result.result == ScriptResult.SKIP:
self.script.onSkip()
self.script.tearDown()
self.script._runLock.release()
self.result.stopTimer()
self.script.result = self.result
class RequiredResource(PluginDependency):
def __init__(self, **kwargs):
kwargs['pluginType'] = 'resource'
super(RequiredResource, self).__init__(**kwargs)
def getDict(self):
return self.attrs
class ScriptParameter(PluginParameter):
pass
class ScriptResult(object):
PASS = 'PASS'
FAIL = 'FAIL'
SKIP = 'SKIP'
STOPPED = 'STOPPED'
def __init__(self):
self._result = self.PASS
self._reason = ''
self._startTime = 0
self._stopTime = 0
self._failures = []
@property
def result(self):
return self._result
@result.setter
def result(self, value):
if value in [self.PASS, self.FAIL, self.SKIP, self.STOPPED]:
self._result = value
else:
raise ValueError("Invalid result type")
@property
def reason(self):
if self._reason != '':
return self._reason
else:
# Latest failure
if len(self._failures) > 0:
return self._failures[-1]
else:
return ''
@reason.setter
def reason(self, value):
self._reason = value
@property
def failCount(self):
return len(self._failures)
def addFailure(self, msg):
self._failures.append(msg)
def startTimer(self):
self._startTime = time.time()
def stopTimer(self):
self._stopTime = time.time()
@property
def startTime(self):
return self._startTime
@property
def executionTime(self):
return self._stopTime - self._startTime
def toDict(self):
return {
'time': self.startTime,
'result': self.result,
'reason': self.reason,
'failCount': self.failCount,
'executionTime': self.executionTime
}
class ScriptStopException(RuntimeError):
pass
| StarcoderdataPython |
3488258 | <filename>.sagemathcloud/sage_parsing.py<gh_stars>0
"""
sage_parser.py
Code for parsing Sage code blocks sensibly.
"""
#########################################################################################
# Copyright (C) 2013 <NAME> <<EMAIL>> #
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
import string
import traceback
def get_input(prompt):
try:
r = raw_input(prompt)
z = r
if z.rstrip().endswith(':'):
while True:
try:
z = raw_input('... ')
except EOFError:
quit = True
break
if z != '':
r += '\n ' + z
else:
break
return r
except EOFError:
return None
#def strip_leading_prompts(code, prompts=['sage:', '....:', '...:', '>>>', '...']):
# code, literals, state = strip_string_literals(code)
# code2 = []
# for line in code.splitlines():
# line2 = line.lstrip()
# for p in prompts:
# if line2.startswith(p):
# line2 = line2[len(p):]
# if p[0] != '.':
# line2 = line2.lstrip()
# break
# code2.append(line2)
# code = ('\n'.join(code2))%literals
# return code
def preparse_code(code):
import sage.all_cmdline
return sage.all_cmdline.preparse(code, ignore_prompts=True)
def strip_string_literals(code, state=None):
new_code = []
literals = {}
counter = 0
start = q = 0
if state is None:
in_quote = False
raw = False
else:
in_quote, raw = state
while True:
sig_q = code.find("'", q)
dbl_q = code.find('"', q)
hash_q = code.find('#', q)
q = min(sig_q, dbl_q)
if q == -1: q = max(sig_q, dbl_q)
if not in_quote and hash_q != -1 and (q == -1 or hash_q < q):
# it's a comment
newline = code.find('\n', hash_q)
if newline == -1: newline = len(code)
counter += 1
label = "L%s" % counter
literals[label] = code[hash_q:newline] # changed from sage
new_code.append(code[start:hash_q].replace('%','%%'))
new_code.append("%%(%s)s" % label)
start = q = newline
elif q == -1:
if in_quote:
counter += 1
label = "L%s" % counter
literals[label] = code[start:]
new_code.append("%%(%s)s" % label)
else:
new_code.append(code[start:].replace('%','%%'))
break
elif in_quote:
if code[q-1] == '\\':
k = 2
while code[q-k] == '\\':
k += 1
if k % 2 == 0:
q += 1
if code[q:q+len(in_quote)] == in_quote:
counter += 1
label = "L%s" % counter
literals[label] = code[start:q+len(in_quote)]
new_code.append("%%(%s)s" % label)
q += len(in_quote)
start = q
in_quote = False
else:
q += 1
else:
raw = q>0 and code[q-1] in 'rR'
if len(code) >= q+3 and (code[q+1] == code[q] == code[q+2]):
in_quote = code[q]*3
else:
in_quote = code[q]
new_code.append(code[start:q].replace('%', '%%'))
start = q
q += len(in_quote)
return "".join(new_code), literals, (in_quote, raw)
def end_of_expr(s):
"""
The input string s is a code expression that contains no strings (they have been stripped).
Find the end of the expression that starts at the beginning of s by finding the first whitespace
at which the parenthesis and brackets are matched.
The returned index is the position *after* the expression.
"""
i = 0
parens = 0
brackets = 0
while i<len(s):
c = s[i]
if c == '(':
parens += 1
elif c == '[':
brackets += 1
elif c == ')':
parens -= 1
elif c == ']':
brackets -= 1
elif parens == 0 and brackets == 0 and (c == ' ' or c == '\t'):
return i
i += 1
return i
# NOTE: The dec_args dict will leak memory over time. However, it only
# contains code that was entered, so it should never get big. It
# seems impossible to know for sure whether a bit of code will be
# eventually needed later, so this leakiness seems necessary.
dec_counter = 0
dec_args = {}
# Divide the input code (a string) into blocks of code.
def divide_into_blocks(code):
global dec_counter
# strip string literals from the input, so that we can parse it without having to worry about strings
code, literals, state = strip_string_literals(code)
# divide the code up into line lines.
code = code.splitlines()
# Compute the line-level code decorators.
c = list(code)
try:
v = []
for line in code:
done = False
# Transform shell escape into sh decorator.
if line.lstrip().startswith('!'):
line = line.replace('!', "%%sh ", 1)
# Check for cell decorator
# NOTE: strip_string_literals maps % to %%, because %foo is used for python string templating.
if line.lstrip().startswith('%%'):
i = line.find("%")
j = end_of_expr(line[i+2:]) + i+2 + 1 # +1 for the space or tab delimiter
expr = line[j:]%literals
# Special case -- if % starts line *and* expr is empty (or a comment),
# then code decorators impacts the rest of the code.
sexpr = expr.strip()
if i == 0 and (len(sexpr) == 0 or sexpr.startswith('#')):
new_line = '%ssalvus.execute_with_code_decorators(*_salvus_parsing.dec_args[%s])'%(line[:i], dec_counter)
expr = ('\n'.join(code[len(v)+1:]))%literals
done = True
else:
# Expr is nonempty -- code decorator only impacts this line
new_line = '%ssalvus.execute_with_code_decorators(*_salvus_parsing.dec_args[%s])'%(line[:i], dec_counter)
dec_args[dec_counter] = ([line[i+2:j]%literals], expr)
dec_counter += 1
else:
new_line = line
v.append(new_line)
if done:
break
code = v
except Exception, mesg:
code = c
## Tested this: Completely disable block parsing:
## but it requires the caller to do "exec compile(block+'\n', '', 'exec') in namespace, locals", which means no display hook,
## so "2+2" breaks.
## return [[0,len(code)-1,('\n'.join(code))%literals]]
# take only non-empty lines now for Python code.
code = [x for x in code if x.strip()]
# Compute the blocks
i = len(code)-1
blocks = []
while i >= 0:
stop = i
paren_depth = code[i].count('(') - code[i].count(')')
brack_depth = code[i].count('[') - code[i].count(']')
curly_depth = code[i].count('{') - code[i].count('}')
while i>=0 and ((len(code[i]) > 0 and (code[i][0] in string.whitespace or code[i][:2] == '%(')) or paren_depth < 0 or brack_depth < 0 or curly_depth < 0):
i -= 1
if i >= 0:
paren_depth += code[i].count('(') - code[i].count(')')
brack_depth += code[i].count('[') - code[i].count(']')
curly_depth += code[i].count('{') - code[i].count('}')
# remove comments
for k, v in literals.iteritems():
if v.startswith('#'):
literals[k] = ''
block = ('\n'.join(code[i:]))%literals
bs = block.strip()
if bs: # has to not be only whitespace
blocks.insert(0, [i, stop, bs])
code = code[:i]
i = len(code)-1
# merge try/except/finally/decorator/else/elif blocks
i = 1
def merge():
"Merge block i-1 with block i."
blocks[i-1][-1] += '\n' + blocks[i][-1]
blocks[i-1][1] = blocks[i][1]
del blocks[i]
while i < len(blocks):
s = blocks[i][-1].lstrip()
# finally/except lines after a try
if (s.startswith('finally') or s.startswith('except')) and blocks[i-1][-1].lstrip().startswith('try'):
merge()
# function definitions
elif s.startswith('def') and blocks[i-1][-1].splitlines()[-1].lstrip().startswith('@'):
merge()
# lines starting with else conditions (if *and* for *and* while!)
elif s.startswith('else') and (blocks[i-1][-1].lstrip().startswith('if') or blocks[i-1][-1].lstrip().startswith('while') or blocks[i-1][-1].lstrip().startswith('for') or blocks[i-1][-1].lstrip().startswith('elif')):
merge()
# lines starting with elif
elif s.startswith('elif') and blocks[i-1][-1].lstrip().startswith('if'):
merge()
# do not merge blocks -- move on to next one
else:
i += 1
return blocks
############################################
CHARS0 = string.ascii_letters + string.digits + '_'
CHARS = CHARS0 + '.'
def guess_last_expression(obj): # TODO: bad guess -- need to use a parser to go any further.
i = len(obj)-1
while i >= 0 and obj[i] in CHARS:
i -= 1
return obj[i+1:]
def is_valid_identifier(target):
if len(target) == 0: return False
for x in target:
if x not in CHARS0:
return False
if target[0] not in string.ascii_letters + '_':
return False
return True
# Keywords from http://docs.python.org/release/2.7.2/reference/lexical_analysis.html
_builtin_completions = __builtins__.keys() + ['and', 'del', 'from', 'not', 'while', 'as', 'elif', 'global', 'or', 'with', 'assert', 'else', 'if', 'pass', 'yield', 'break', 'except', 'import', 'print', 'class', 'exec', 'in', 'raise', 'continue', 'finally', 'is', 'return', 'def', 'for', 'lambda', 'try']
def introspect(code, namespace, preparse=True):
"""
INPUT:
- code -- a string containing Sage (if preparse=True) or Python code.
- namespace -- a dictionary to complete in (we also complete using
builtins such as 'def', 'for', etc.
- preparse -- a boolean
OUTPUT:
An object: {'result':, 'target':, 'expr':, 'status':, 'get_help':, 'get_completions':, 'get_source':}
"""
# result: the docstring, source code, or list of completions (at
# return, it might thus be either a list or a string)
result = []
# expr: the part of code that is used to do the completion, e.g.,
# for 'a = n.m.foo', expr would be 'n.m.foo'. It can be more complicated,
# e.g., for '(2+3).foo.bar' it would be '(2+3).foo'.
expr = ''
# target: for completions, target is the part of the code that we
# complete on in the namespace defined by the object right before
# it, e.g., for n.m.foo, the target is "foo". target is the empty
# string for source code and docstrings.
target = ''
# When returning, exactly one of the following will be true:
get_help = False # getting docstring of something
get_source = False # getting source code of a function
get_completions = True # getting completions of an identifier in some namespace
try:
# Strip all strings from the code, replacing them by template
# symbols; this makes parsing much easier.
code0, literals, state = strip_string_literals(code.strip()) # we strip, since trailing space could cause confusion below
# Move i so that it points to the start of the last expression in the code.
# (TODO: this should probably be replaced by using ast on preparsed version. Not easy.)
i = max([code0.rfind(t) for t in '\n;='])+1
while i<len(code0) and code0[i] in string.whitespace:
i += 1
# Break the line in two pieces: before_expr | expr; we may
# need before_expr in order to evaluate and make sense of
# expr. We also put the string literals back in, so that
# evaluation works.
expr = code0[i:]%literals
before_expr = code0[:i]%literals
if '.' not in expr and '(' not in expr and ')' not in expr and '?' not in expr:
# Easy case: this is just completion on a simple identifier in the namespace.
get_help = False; get_completions = True; get_source = False
target = expr
else:
# Now for all of the other harder cases.
i = max([expr.rfind(s) for s in '?('])
if i >= 1 and i == len(expr)-1 and expr[i-1] == '?': # expr ends in two ?? -- source code
get_source = True; get_completions = False; get_help = False
target = ""
obj = expr[:i-1]
elif i == len(expr)-1: # ends in ( or ? (but not ??) -- docstring
get_help = True; get_completions = False; get_source = False
target = ""
obj = expr[:i]
else: # completions (not docstrings or source)
get_help = False; get_completions = True; get_source = False
i = expr.rfind('.')
target = expr[i+1:]
if target == '' or is_valid_identifier(target):
obj = expr[:i]
else:
expr = guess_last_expression(target)
i = expr.rfind('.')
if i != -1:
target = expr[i+1:]
obj = expr[:i]
else:
target = expr
if get_completions and target == expr:
j = len(expr)
v = [x[j:] for x in (namespace.keys() + _builtin_completions) if x.startswith(expr)]
else:
# We will try to evaluate
# obj. This is danerous and a priori could take
# forever, so we spend at most 1 second doing this --
# if it takes longer a signal kills the evaluation.
# Obviously, this could in fact lock if
# non-interruptable code is called, which should be rare.
O = None
try:
import signal
def mysig(*args): raise KeyboardInterrupt
signal.signal(signal.SIGALRM, mysig)
signal.alarm(1)
import sage.all_cmdline
if before_expr.strip():
try:
exec (before_expr if not preparse else preparse_code(before_expr)) in namespace
except Exception, msg:
pass
# uncomment for debugging only
# traceback.print_exc()
# We first try to evaluate the part of the expression before the name
try:
O = eval(obj if not preparse else preparse_code(obj), namespace)
except SyntaxError:
# If that fails, we try on a subexpression.
# TODO: This will not be needed when
# this code is re-written to parse using an
# AST, instead of using this lame hack.
obj = guess_last_expression(obj)
O = eval(obj if not preparse else preparse_code(obj), namespace)
finally:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def get_file():
try:
import sage.misc.sageinspect
return " File: " + eval('getdoc(O)', {'getdoc':sage.misc.sageinspect.sage_getfile, 'O':O}) + "\n"
except Exception, err:
return "Unable to read source filename (%s)"%err
if get_help:
import sage.misc.sageinspect
result = get_file()
try:
def f(s):
x = sage.misc.sageinspect.sage_getargspec(s)
defaults = list(x.defaults) if x.defaults else []
args = list(x.args) if x.defaults else []
v = []
if x.keywords:
v.insert(0,'**kwds')
if x.varargs:
v.insert(0,'*args')
while defaults:
d = defaults.pop()
k = args.pop()
v.insert(0,'%s=%r'%(k,d))
v = args + v
t = " Signature : %s(%s)\n"%(obj, ', '.join(v))
t += " Docstring :\n" + sage.misc.sageinspect.sage_getdoc(s).strip()
return t
result += eval('getdoc(O)', {'getdoc':f, 'O':O})
except Exception, err:
result += "Unable to read docstring (%s)"%err
result = result.lstrip().replace('\n ','\n') # Get rid of the 3 spaces in front of everything.
elif get_source:
import sage.misc.sageinspect
result = get_file()
try:
result += " Source:\n " + eval('getsource(O)', {'getsource':sage.misc.sageinspect.sage_getsource, 'O':O})
except Exception, err:
result += "Unable to read source code (%s)"%err
elif get_completions:
if O is not None:
v = dir(O)
if hasattr(O, 'trait_names'):
v += O.trait_names()
if not target.startswith('_'):
v = [x for x in v if x and not x.startswith('_')]
j = len(target)
v = [x[j:] for x in v if x.startswith(target)]
else:
v = []
if get_completions:
result = list(sorted(set(v), lambda x,y:cmp(x.lower(),y.lower())))
except Exception, msg:
traceback.print_exc()
result = []
status = 'ok'
else:
status = 'ok'
return {'result':result, 'target':target, 'expr':expr, 'status':status, 'get_help':get_help, 'get_completions':get_completions, 'get_source':get_source} | StarcoderdataPython |
5005939 | <reponame>andrzejmalota/StockPricePrediction
from src.utils.io import load, save
import pandas as pd
def save_targets():
data = load('../../data/raw/stock_data.pickle')
save(pd.DataFrame(data['amazon'][['Date', 'Close']], columns=['Date', 'Close']), '../../data/processed/targets_amazon.pickle')
if __name__ == '__main__':
save_targets()
| StarcoderdataPython |
1809705 | import json
import pandas as pd
db_df = pd.read_csv("database.csv")
db_df.to_csv("database_without_header.csv", header=False, index=False)
db_1_df = db_df.sample(frac=0.5)
db_2_df = db_df.drop(db_1_df.index)
db_1_df.to_csv("database1.csv")
db_2_df.to_csv("database2.csv")
db_json = db_df.to_json(orient="records")
parsed_db_json = json.loads(db_json)
with open("database.json", "w") as db_json_file:
json.dump(parsed_db_json, db_json_file, indent=4)
with open("EmployeeData.json") as employee_json_file:
employee_df = pd.read_json(employee_json_file, orient="records")
employee_df.to_csv("EmployeeData.csv", index=False)
| StarcoderdataPython |
1886205 | import gym
from pgdrive import PGDriveEnv
def _a(env, action):
assert env.action_space.contains(action)
obs, reward, done, info = env.step(action)
assert env.observation_space.contains(obs)
assert isinstance(info, dict)
def _step(env):
try:
obs = env.reset()
assert env.observation_space.contains(obs)
for _ in range(5):
_a(env, env.action_space.sample())
finally:
env.close()
def test_naive_multi_agent_pgdrive():
# env = PGDriveEnv(config={"num_agents": 1})
# assert isinstance(env.action_space, gym.spaces.Box)
# _step(env)
# env.close()
env = PGDriveEnv(
config={
"num_agents": 10,
"target_vehicle_configs": {"agent{}".format(i): {
"born_longitude": i * 4
}
for i in range(10)}
}
)
assert isinstance(env.action_space, gym.spaces.Dict)
obs = env.reset()
assert isinstance(obs, dict)
a = env.action_space.sample()
assert isinstance(a, dict)
o, r, d, i = env.step(a)
assert isinstance(o, dict)
assert isinstance(r, dict)
assert isinstance(d, dict)
assert isinstance(i, dict)
_step(env)
if __name__ == '__main__':
test_naive_multi_agent_pgdrive()
| StarcoderdataPython |
4935174 | <gh_stars>1-10
from project import (
calculator
)
def main():
return calculator.add(3, 5)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6584335 | <gh_stars>1-10
# coding=utf-8
"""
This module contains config objects needed by paypal.interface.PayPalInterface.
Most of this is transparent to the end developer, as the PayPalConfig object
is instantiated by the PayPalInterface object.
"""
import logging
import os
from pprint import pformat
from paypal.compat import basestring
from paypal.exceptions import PayPalConfigError
logger = logging.getLogger('paypal.settings')
class PayPalConfig(object):
"""
The PayPalConfig object is used to allow the developer to perform API
queries with any number of different accounts or configurations. This
is done by instantiating paypal.interface.PayPalInterface, passing config
directives as keyword args.
"""
# Used to validate correct values for certain config directives.
_valid_ = {
'API_ENVIRONMENT' : ['SANDBOX', 'PRODUCTION'],
'API_AUTHENTICATION_MODE' : ['3TOKEN', 'CERTIFICATE'],
}
# Various API servers.
_API_ENDPOINTS = {
# In most cases, you want 3-Token. There's also Certificate-based
# authentication, which uses different servers, but that's not
# implemented.
'3TOKEN': {
'SANDBOX' : 'https://api-3t.sandbox.paypal.com/nvp',
'PRODUCTION' : 'https://api-3t.paypal.com/nvp',
}
}
_PAYPAL_URL_BASE = {
'SANDBOX' : 'https://www.sandbox.paypal.com/webscr',
'PRODUCTION' : 'https://www.paypal.com/webscr',
}
API_VERSION = '72.0'
# Defaults. Used in the absence of user-specified values.
API_ENVIRONMENT = 'SANDBOX'
API_AUTHENTICATION_MODE = '3TOKEN'
# 3TOKEN credentials
API_USERNAME = None
API_PASSWORD = <PASSWORD>
API_SIGNATURE = None
# API Endpoints are just API server addresses.
API_ENDPOINT = None
PAYPAL_URL_BASE = None
# API Endpoint CA certificate chain. If this is True, do a simple SSL
# certificate check on the endpoint. If it's a full path, verify against
# a private cert.
# e.g. '/etc/ssl/certs/Verisign_Class_3_Public_Primary_Certification_Authority.pem'
API_CA_CERTS = True
# UNIPAY credentials
UNIPAY_SUBJECT = None
ACK_SUCCESS = "SUCCESS"
ACK_SUCCESS_WITH_WARNING = "SUCCESSWITHWARNING"
# In seconds. Depending on your setup, this may need to be higher.
HTTP_TIMEOUT = 15.0
def __init__(self, **kwargs):
"""
PayPalConfig constructor. **kwargs catches all of the user-specified
config directives at time of instantiation. It is fine to set these
values post-instantiation, too.
Some basic validation for a few values is performed below, and defaults
are applied for certain directives in the absence of
user-provided values.
"""
if kwargs.get('API_ENVIRONMENT'):
api_environment = kwargs['API_ENVIRONMENT'].upper()
# Make sure the environment is one of the acceptable values.
if api_environment not in self._valid_['API_ENVIRONMENT']:
raise PayPalConfigError('Invalid API_ENVIRONMENT')
else:
self.API_ENVIRONMENT = api_environment
if kwargs.get('API_AUTHENTICATION_MODE'):
auth_mode = kwargs['API_AUTHENTICATION_MODE'].upper()
# Make sure the auth mode is one of the known/implemented methods.
if auth_mode not in self._valid_['API_AUTHENTICATION_MODE']:
choices = ", ".join(self._valid_['API_AUTHENTICATION_MODE'])
raise PayPalConfigError(
"Not a supported auth mode. Use one of: %s" % choices
)
else:
self.API_AUTHENTICATION_MODE = auth_mode
# Set the API endpoints, which is a cheesy way of saying API servers.
self.API_ENDPOINT = self._API_ENDPOINTS[self.API_AUTHENTICATION_MODE][self.API_ENVIRONMENT]
self.PAYPAL_URL_BASE = self._PAYPAL_URL_BASE[self.API_ENVIRONMENT]
# Set the CA_CERTS location. This can either be a None, a bool, or a
# string path.
if kwargs.get('API_CA_CERTS'):
self.API_CA_CERTS = kwargs['API_CA_CERTS']
if isinstance(self.API_CA_CERTS, basestring) and \
not os.path.exists(self.API_CA_CERTS):
# A CA Cert path was specified, but it's invalid.
raise PayPalConfigError('Invalid API_CA_CERTS')
# set the 3TOKEN required fields
if self.API_AUTHENTICATION_MODE == '3TOKEN':
for arg in ('API_USERNAME', 'API_PASSWORD', 'API_SIGNATURE'):
if arg not in kwargs:
raise PayPalConfigError('Missing in PayPalConfig: %s ' % arg)
setattr(self, arg, kwargs[arg])
for arg in ['HTTP_TIMEOUT']:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
logger.debug(
'PayPalConfig object instantiated with kwargs: %s' % pformat(kwargs)
)
| StarcoderdataPython |
8192531 | # This file is a part of the HiRISE DTM Importer for Blender
#
# Copyright (C) 2017 Arizona Board of Regents on behalf of the Planetary Image
# Research Laboratory, Lunar and Planetary Laboratory at the University of
# Arizona.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""A HiRISE DTM Importer for Blender"""
bl_info = {
"name": "HiRISE DTM Importer",
"author": "<NAME> (<EMAIL>)",
"version": (0, 2, 2),
"blender": (2, 78, 0),
"location": "File > Import > HiRISE DTM (.img)",
"description": "Import a HiRISE DTM as a mesh",
"warning": "May consume a lot of memory",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/HiRISE_DTM_from_PDS_IMG",
"category": "Import-Export",
}
if "bpy" in locals():
import importlib
importlib.reload(importer)
importlib.reload(terrainpanel)
else:
from .ui import importer
from .ui import terrainpanel
import bpy
def menu_import(self, context):
i = importer.ImportHiRISETerrain
self.layout.operator(i.bl_idname, text=i.bl_label)
def register():
bpy.utils.register_module(__name__)
bpy.types.TOPBAR_MT_file_import.append(menu_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.TOPBAR_MT_file_import.remove(menu_import)
if __name__ == '__main__':
register()
| StarcoderdataPython |
3326221 | <reponame>vedant-jad99/GeeksForGeeks-DSA-Workshop-Complete-Codes<gh_stars>1-10
"""
Link to the question - https://leetcode.com/explore/featured/card/july-leetcoding-challenge-2021/610/week-3-july-15th-july-21st/3817/
"""
class Solution:
def threeEqualParts(self, arr: List[int]) -> List[int]:
"""
--------------------------------------------------------------------------
Approach 1:
Dividing the array in equal parts and comparing each part, continuously.
Time complexity - O(n^2).
Test cases passed - 108/118
--------------------------------------------------------------------------
if len(arr) < 3:
return [-1, -1]
arr = [str(i) for i in arr]
string = ''.join(arr)
for i in range(len(arr) - 2):
for j in range(i + 1, len(arr) - 1):
if int(string[:i + 1]) == int(string[i + 1:j + 1]):
if int(string[i + 1:j + 1]) == int(string[j + 1:]):
return [i, j + 1]
return [-1, -1]
"""
"""
----------------------------------------------------------------------------
Approach 2:
To divide in 3 parts, every part must have equal number of ones.
Time complexity - O(n)
Test cases passed - All
"""
n_ones = sum(arr)
if n_ones == 0:
return [0, len(arr) - 1]
elif n_ones%3 != 0:
return [-1, -1]
part = n_ones/3
ones = 0
possible = []
for i, e in enumerate(arr):
if e == 1:
ones += e
if ones in {1, part + 1, 2 * part + 1}:
possible.append(i)
if ones in {part, 2*part, 3*part}:
possible.append(i)
p1, p2, q1, q2, r1, r2 = possible
if not(arr[p1:p2] == arr[q1:q2] == arr[r1:r2]):
return [-1, -1]
x = q1 - p2 - 1
y = r1 - q2 - 1
z = len(arr) - r2 - 1
if x < z or y < z:
return [-1, -1]
return [p2 + z, q2 + z + 1]
| StarcoderdataPython |
6615991 | """The rmvtransport component."""
| StarcoderdataPython |
4904895 | import asyncio
from typing import Type, Union, Callable, Optional
from ..handler import await_exec_target
from ..utils import search_event
from ..entities.event import TemplateEvent, ParamRet
from ..entities.auxiliary import BaseAuxiliary
class StepOut(BaseAuxiliary):
event_type: Type[TemplateEvent]
handler: Callable
target_args: ParamRet
def __init__(self, event: Union[Type[TemplateEvent], str]):
if isinstance(event, str):
name = event
event = search_event(event)
if not event:
raise Exception(name + " cannot found!")
self.event_type = event
self._future: Optional[asyncio.Future] = None
self._waited: bool = False
super().__init__()
@self.set_aux("before_parse", "judge")
def judge(j_event: TemplateEvent) -> bool:
if type(j_event) is self.event_type:
self.target_args = j_event.get_params()
return True
return False
def __del__(self):
if self._future:
self._future.cancel()
@staticmethod
def handler(*args, **kwargs):
pass
def __call__(self, func: Callable):
setattr(self, 'handler', func)
return self
@property
def future(self):
return self._future
def done(self) -> bool:
"""触发器是否已经完成。"""
return self._waited or bool(self._future and self._future.done())
async def make_done(self):
if self.done():
return False
try:
result = await await_exec_target(
self.handler,
self.target_args
)
if result is not None and not self._future.done():
self._future.set_result(result)
return True
except Exception as e:
if not self._future.done():
self._future.set_exception(e)
return True
return False
async def wait(self, timeout: float = 0.):
if self._waited:
raise RuntimeError('已被等待。')
if self._future is None:
self._future = asyncio.get_running_loop().create_future()
try:
if timeout > 0:
return await asyncio.wait_for(self._future, timeout)
else:
return await self._future
except asyncio.TimeoutError:
return None
finally:
self._waited = self._future.done()
| StarcoderdataPython |
1745148 | <filename>engine/world/world_2d.py
from engine.collision import CollisionCache, PositionalCollisionCache
from engine.collision import resolve_physical_collision
from engine.event_dispatcher import EventDispatcher
from engine.geometry import detect_overlap_2d
from .world_object import WorldObject, COLLIDER, TRIGGER
class World2d(EventDispatcher):
"""Detects overlap and resolves collisions between game objects.
This is implemented as a sweep-and-prune algorithm over all registered
objects. Objects can be registered as:
* Triggers: Dispatches an on_object_enter event when this object overlaps
with another, and an on_object_leave event when they no longer overlap.
* Colliders: Resolves collisions between two colliders using the
:mod:`collision` module. An on_collision event is dispatched to botch
objects upon resolution.
Events:
on_update_enter: A world update has just begun.
The world will be passed to the listeners.
on_update_exit: A world update has just completed.
The world will be passed to the listeners.
on_collider_add: A collider was added to the world.
The collider will be passed to the listeners.
on_trigger_add: A trigger was added to the world.
The trigger will be passed to the listeners.
"""
def __init__(self):
"""Creates an empty world with two dimensional physics."""
super(World2d, self).__init__()
self._colliders = PositionalCollisionCache()
self._triggers = CollisionCache()
self._objects = []
self.register_event_type('on_update_enter')
self.register_event_type('on_update_exit')
self.register_event_type('on_collider_add')
self.register_event_type('on_trigger_add')
def add_collider(self, physical_object):
"""Adds a game object to be treated as a collider.
Args:
physical_object (:obj:`engine.game_object.PhysicalObject`):
The game object to resolve collisions against.
"""
self._objects.append(WorldObject(physical_object, COLLIDER))
self.dispatch_event('on_collider_add', physical_object)
def add_trigger(self, physical_object):
"""Adds a game object to be treated as a trigger area.
Args:
physical_object (:obj:`engine.game_object.GameObject`):
The game object to detect collisions with.
"""
self._objects.append(WorldObject(physical_object, TRIGGER))
self.dispatch_event('on_trigger_add', physical_object)
def update(self, ms):
"""Updates the state of the world by processing object collisions.
Args:
ms (int): The time since last update, in milliseconds.
"""
self.dispatch_event('on_update_enter', self)
# Sort objects by their x coordinate
self._objects.sort(key=lambda world_object: world_object.object.x)
sweep_list = []
for obj in self._objects:
# Run narrow phase on sweep entries, removing swept-past objects
sweep_list = [self._narrow_phase(x, obj) for x in sweep_list
if not self._is_swept_past(x, obj)]
# Add the current object to the sweep list
sweep_list.append(obj)
# Update the colliders and triggers
self._colliders.update(ms)
self._triggers.update(ms)
# Notify of objects entering and leaving collisions
self._dispatch(
'on_collider_enter', self._colliders.get_new_collisions())
self._dispatch(
'on_collider_exit', self._colliders.get_removed_collisions())
self._dispatch(
'on_trigger_enter', self._triggers.get_new_collisions())
self._dispatch(
'on_trigger_exit', self._triggers.get_removed_collisions())
self.dispatch_event('on_update_exit', self)
def _narrow_phase(self, first, second):
"""Detects and processes a collision between two game objects.
If both objects are colliders, the lighter object will be moved.
Args:
first (:obj:`world_object.WorldObject`):
The first potential collision object.
second (:obj:`world_object.WorldObject`):
The second potential collision object.
Returns:
The first object passed into the narrow phase.
"""
# Process as a collider collision if neither object is a trigger
if TRIGGER not in (first.type, second.type):
self._resolve_colliders(first.object, second.object)
else:
self._resolve_triggers(first.object, second.object)
return first
def _resolve_colliders(self, first, second):
"""Resolves a collision between colliders.
Args:
first (:obj:`game_object.GameObject`):
The first collider in the collision.
second (:obj:`game_object.GameObject`):
The second collider in the collision.
"""
velocity_delta = resolve_physical_collision(first, second)
self._colliders.add_collision(first, second, velocity_delta)
def _resolve_triggers(self, first, second):
"""Resolves a collision between one trigger and any other object.
Args:
first (:obj:`game_object.GameObject`):
The first trigger or collider in the collision.
second (:obj:`game_object.GameObject`):
The second trigger or collider in the collision.
"""
if detect_overlap_2d(first, second):
self._triggers.add_collision(first, second)
def _dispatch(self, event, collisions):
"""Dispatches a collision event to all collisions.
Args:
event (str): Name of the event to fire.
collisions (list of tuple of :obj:`game_object.GameObject`):
A list with pairs of objects which have collided.
"""
for first, second in collisions:
first.dispatch_event(event, second)
second.dispatch_event(event, first)
def _is_swept_past(self, old_entry, new_entry):
"""Determines if the new entry has swept past the prior entry.
Args:
old_entry (:obj:`world_object.WorldObject`):
The prior entry in the sweep list.
new_entry (:obj:`world_object.WorldObject`):
The new entry into the sweep list.
Returns:
True if the prior entry has been swept past, false otherwise.
"""
prior_endpoint = old_entry.object.x + old_entry.object.width
return prior_endpoint <= new_entry.object.x
| StarcoderdataPython |
3227406 | # Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
"""This module defines action enum for flagging what should be follow up action of a `PaymentCommand`
See `diem.offchain.payment_command.PaymentCommand.follow_up_action` for more details.
"""
from enum import Enum
class Action(Enum):
"""Action for following up a PaymentCommand
List of actions for different PaymentCommand status.
"""
EVALUATE_KYC_DATA = "evaluate_kyc_data"
REVIEW_KYC_DATA = "review_kyc_data"
CLEAR_SOFT_MATCH = "clear_soft_match"
SUBMIT_TXN = "submit_transaction"
| StarcoderdataPython |
1673371 | <filename>moog_demos/gif_writer.py
"""Gif writer to record a video while playing the demo.
Note: If the enter key prints `^M` instead of entering the input, run the
following command:
$ stty sane
"""
import imageio
import logging
import numpy as np
import os
import sys
class GifWriter(object):
"""GifWriter class.
Usage:
my_gif_writer = GifWriter('path/to/a/file.gif')
for image in my_video:
my_gif_writer.add(image)
my_gif_writer.close()
"""
def __init__(self, gif_file, fps=5):
"""Constructor.
Args:
gif_file: String. Full path to gif filename. Should end in '.gif'.
fps: Int. Frames per second for the gif.
"""
# If the gif directory does not exist, ask the user if they want to
# create it.
gif_file = os.path.expanduser(gif_file)
gif_dir = os.path.dirname(gif_file)
if not os.path.exists(gif_dir):
print('Directory {} does not exist'.format(gif_dir))
should_create = input(
'Would you like to create that directory? (y/n)')
if should_create == 'y':
print('Creating directory {}'.format(gif_dir))
os.makedirs(gif_dir)
else:
print('exiting')
sys.exit()
# If the gif directory already exists, ask the user if they want to
# override it.
if os.path.isfile(gif_file):
print('File {} to write gif to already exists.'.format(gif_file))
should_override = input(
'Would you like to override the file there? (y/n)')
if should_override == 'y':
print('Removing {}'.format(gif_file))
os.remove(gif_file)
else:
print('exiting')
sys.exit()
self._gif_file = gif_file
self._images = []
self._fps = fps
def add(self, image):
self._images.append(image)
def close(self):
"""Write the gif."""
print('Writing gif with {} images to file {}'.format(
len(self._images), self._gif_file))
imageio.mimsave(self._gif_file, self._images, fps=self._fps)
| StarcoderdataPython |
6584122 | <filename>python/leet_code/largest_continous_subarray.py
'''
Given an array of integers nums and an integer limit, return the size of the longest continuous subarray such that the absolute difference between any two elements is less than or equal to limit.
In case there is no subarray satisfying the given condition return 0.
Example 1:
Input: nums = [8,2,4,7], limit = 4
Output: 2
Example 2:
Input: nums = [10,1,2,4,7,2], limit = 5
Output: 4
Explanation: The subarray [2,4,7,2] is the longest since the maximum absolute diff is |2-7| = 5 <= 5.
Input: nums = [4,2,2,2,4,4,2,2], limit = 0
Output: 3
Constraints:
1 <= nums.length <= 10^5
1 <= nums[i] <= 10^9
0 <= limit <= 10^9
Observation:
If difference between max and min elements in an array is less than or equal to limit, difference between any two numbers in the array cannot exceed that limit
Solution Approach:
1. For a sub array which meets problem constraints, keep a track of the maximum and minimum element of the sub array
2. When a new element is added to the array, constraints would be broken if absolute difference between this element and minElem/maxElem is greater than limit
1. If a constraint is broken because of minimum element, extract it. Afterwards, keep on popping from min and max heaps till the time we have the index that is one greater than extracted minimum element
2. If a constraint is broken because of maximum element, extract it. Afterwards, keep on popping from min and max heaps till the time we have the index that is one greater than extracted maximum element
Above approach did work but revised modification results in cleaner code
1. Traverse the array
2. Maintain two heaps maxpq and minpq
3. Iterate the array
4. Add tuple contained element value and index in maxpq and minpq
5. While the difference between max element and min element is greater than limit repeat below steps
1. Get the minimum index+1 of the minimum and maximum elements.
2. Delete the max and min elements from maxpq and minpq respectively repeatedly till the time they are in the range from minimum index and current element
6. Calculate the new size of the complaint subarray, if applicable
'''
import heapq
def longestSubarray(nums, limit):
maxpq = []
minpq = []
maxlen, startIndex = 0, 0
for i, num in enumerate(nums):
heapq.heappush(maxpq, (-num, i))
heapq.heappush(minpq, (num, i))
while -maxpq[0][0]-minpq[0][0] > limit:
j = min(maxpq[0][1], minpq[0][1])+1
while maxpq[0][1] < j: heapq.heappop(maxpq)
while minpq[0][1] < j: heapq.heappop(minpq)
startIndex = j
maxlen = max(i-startIndex+1, maxlen)
return maxlen
def test():
assert longestSubarray([8,2,4,7], 4) == 2
assert longestSubarray([10,1,2,4,7,2], 5) == 4
assert longestSubarray([4,2,2,2,4,4,2,2], 0) == 3
test()
| StarcoderdataPython |
375302 | <reponame>TomWerner/AlumniMentoring<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-16 23:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentoring', '0002_auto_20161016_1817'),
]
operations = [
migrations.AlterField(
model_name='menteecontactinformation',
name='facebook_url',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='menteecontactinformation',
name='linkedin_url',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='menteecontactinformation',
name='personal_url',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='mentorcontactinformation',
name='facebook_url',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='mentorcontactinformation',
name='linkedin_url',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='mentorcontactinformation',
name='personal_url',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| StarcoderdataPython |
5068903 | from mstr.requests import AuthenticatedMSTRRESTSession
from mstr.requests import MSTRRESTSession
from mstr.requests.rest import exceptions
import pytest
@pytest.fixture(scope="function")
def session():
return MSTRRESTSession(
base_url="https://demo.microstrategy.com/MicroStrategyLibrary/api/"
)
@pytest.fixture(scope="function")
def logged_in_session():
session = MSTRRESTSession(
base_url="https://demo.microstrategy.com/MicroStrategyLibrary/api/"
)
session.login()
assert session.has_session() is True
yield session
session.logout()
def test_login(session):
assert session.has_session() is False
session.login()
assert session.has_session() is True
session.logout()
assert session.has_session() is False
def test_prolong_session(logged_in_session):
response = logged_in_session.put_sessions()
assert response.status_code == 204
def test_get_session_status(logged_in_session):
response = logged_in_session.get_sessions()
assert response.status_code == 200
def test_get_session_failure(session):
session.login()
assert session.has_session() is True
session.logout()
with pytest.raises(exceptions.SessionException):
session.get_sessions()
def test_remote_session_issue(logged_in_session):
logged_in_session.headers.update({"x-mstr-authtoken": "You're my wife now"})
with pytest.raises(exceptions.MSTRException):
logged_in_session.get_sessions()
def test_context_manager():
with AuthenticatedMSTRRESTSession(
base_url="https://demo.microstrategy.com/MicroStrategyLibrary/api/"
) as session:
assert session.has_session() is True
| StarcoderdataPython |
3584794 | #!/usr/bin/env python3
# The MIT License
# Copyright (c) 2016 Estonian Information System Authority (RIA), Population Register Centre (VRK)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# End-to-end system test for verifying the behaviour of the operational
# monitoring daemon.
# Expecting Python 3.4 under Ubuntu 14.04
# Additional dependencies:
# sudo apt-get install python3-requests
# sudo apt-get install python3-pip
# sudo pip3 install typing
# Expecting key-based SSH access to the security servers and passwordless sudo
# inside the security servers.
# Use ~/.ssh/config for custom usernames and key paths.
# Alternatively, an SSH user can be supplied with the --ssh-user command line
# argument if the same user is suitable for running remote commands on all
# the servers.
import sys
import copy
import time
import socket
import argparse
import unittest
import traceback
import subprocess
from contextlib import contextmanager
from typing import Tuple
sys.path.append('.')
from testcases import *
sys.path.append('..')
import python_common as common
CLIENT_SECURITY_SERVER_ADDRESS = None
PRODUCER_SECURITY_SERVER_ADDRESS = None
SSH_USER = None
SERVICE_RESTART_SLEEP_SECONDS = 2 # can be overridden on the command line
REQUEST_TEMPLATE_DIR = "templates"
LOCAL_INI_PATH = "/etc/xroad/conf.d/local.ini"
LOCAL_INI_PARAMETERS = {
# The default offset is a minute to support heavy loads, but we want to be able to
# get the operational data records pertaining to our tests without waiting
# too long.
"op-monitor.records-available-timestamp-offset-seconds": 0,
# Make sure all the records are returned unless we reconfigure the daemon
# explicitly.
"op-monitor.max-records-in-payload": 100,
}
class OperationalMonitoringIntegrationTest(unittest.TestCase):
def test_simple_store_and_query(self):
self._run_test(test_simple_store_and_query, LOCAL_INI_PARAMETERS)
def test_soap_fault(self):
self._run_test(test_soap_fault, LOCAL_INI_PARAMETERS)
def test_get_metadata(self):
self._run_test(test_get_metadata, LOCAL_INI_PARAMETERS)
def test_metaservices(self):
self._run_test(test_metaservices, LOCAL_INI_PARAMETERS)
def test_attachments(self):
self._run_test(test_attachments, LOCAL_INI_PARAMETERS)
def test_health_data(self):
ini_parameters = copy.deepcopy(LOCAL_INI_PARAMETERS)
# Let the health statistics period be reset in a reasonable period in
# the context of the tests so we can check that the values are reset,
# too.
ini_parameters["op-monitor.health-statistics-period-seconds"] = 10
self._run_test(test_health_data, ini_parameters)
def test_limited_operational_data_response(self):
ini_parameters = copy.deepcopy(LOCAL_INI_PARAMETERS)
# This is to check if sending operational data in multiple batches works
# as expected.
ini_parameters["op-monitor.max-records-in-payload"] = 2
ini_parameters["op-monitor.records-available-timestamp-offset-seconds"] = 2
self._run_test(test_limited_operational_data_response, ini_parameters)
def test_service_cluster(self):
self._run_test(test_service_cluster, LOCAL_INI_PARAMETERS)
def test_outputspec(self):
self._run_test(test_outputspec, LOCAL_INI_PARAMETERS)
def test_time_interval(self):
self._run_test(test_time_interval, LOCAL_INI_PARAMETERS)
def test_client_filter(self):
self._run_test(test_client_filter, LOCAL_INI_PARAMETERS)
def test_zero_buffer_size(self):
ini_parameters = copy.deepcopy(LOCAL_INI_PARAMETERS)
# This is to check that setting operational monitoring buffer size
# to zero results with operational data not being sent to the
# operational monitoring daemon.
ini_parameters["op-monitor-buffer.size"] = 0
self._run_test(test_zero_buffer_size, ini_parameters)
def _run_test(self, testcase_module_name, ini_parameters):
# Wait before starting the test case to avoid getting operational monitoring data
# of previous testcases in the result of the operational data requests made in
# this test.
time.sleep(1)
exit_status = 0
with configure_and_restart_opmonitor(
(CLIENT_SECURITY_SERVER_ADDRESS, PRODUCER_SECURITY_SERVER_ADDRESS),
SSH_USER, ini_parameters):
try:
getattr(testcase_module_name, "run")(
CLIENT_SECURITY_SERVER_ADDRESS, PRODUCER_SECURITY_SERVER_ADDRESS,
SSH_USER, REQUEST_TEMPLATE_DIR)
except Exception as e:
print("An exception occurred: %s" % (e, ))
traceback.print_exc()
exit_status = 1
# Let the context manager restore the configuration and restart the
# servers before we exit.
if exit_status != 0:
sys.exit(1)
def _get_initial_ini_parameters(
server_addresses: Tuple, ssh_user: str, target_ini_parameter_keys: Tuple):
""" Helper for getting the values of the target ini parameters from the given servers."""
initial_parameters = dict()
for param_key in target_ini_parameter_keys:
param_parts = param_key.split('.')
ini_section = param_parts[0]
parameter = param_parts[1]
command = "sudo crudini --get %s %s %s" % (LOCAL_INI_PATH, ini_section, parameter, )
for server_address in server_addresses:
user_and_server = common.generate_user_and_server(server_address, ssh_user)
param_and_server = (param_key, server_address)
try:
param_value = subprocess.check_output(["ssh", user_and_server, command, ])
value = param_value.strip().decode('utf-8')
initial_parameters[param_and_server] = value
except subprocess.CalledProcessError as e:
if e.returncode == 1:
print("No existing value was found for the parameter '%s' " \
"in security server %s \n" % (param_key, server_address))
initial_parameters[param_and_server] = None
continue
print(e)
sys.exit(1)
return initial_parameters
def _configure_ini_parameters(
server_addresses: Tuple, target_ini_parameters: dict,
initial_parameters: dict, ssh_user: str, mode: str):
""" Helper for reconfiguring the servers before and after the tests.
Use mode="edit" for initial editing and mode="restore" for restoring
the configuration.
"""
service_restarted = False
activity = None
if mode == "restore":
activity = "Restoring"
elif mode == "edit":
activity = "Editing"
else:
raise Exception("Programming error: only edit and restore modes are available")
for server_address in server_addresses:
opmonitor_needs_restart = False
proxy_needs_restart = False
for param, value in target_ini_parameters.items():
initial_value = initial_parameters[(param, server_address)]
if str(target_ini_parameters[param]) != str(initial_value):
param_parts = param.split('.')
ini_section = param_parts[0]
parameter = param_parts[1]
if ini_section == "op-monitor":
opmonitor_needs_restart = True
target = "daemon"
elif ini_section == "op-monitor-buffer":
proxy_needs_restart = True
target = "buffer"
print("\n%s the configuration parameters of the " \
"operational monitoring %s in security server %s" %
(activity, target, server_address))
command = None
if mode == "restore" and initial_value is None:
print("Removing '%s'" % (param, ))
command = "sudo crudini --del %s %s %s" % (
LOCAL_INI_PATH, ini_section, parameter, )
else:
value_to_set = value
if mode == "restore":
value_to_set = initial_value
print("Setting '%s' to '%s'" % (param, value_to_set,))
command = "sudo crudini --set %s %s %s %s" % (
LOCAL_INI_PATH, ini_section, parameter, value_to_set, )
user_and_server = common.generate_user_and_server(
server_address, ssh_user)
try:
subprocess.check_call(["ssh", user_and_server, command, ])
except subprocess.CalledProcessError as e:
print(e)
# XXX This will leave the configuration files in an undefined state.
sys.exit(1)
if opmonitor_needs_restart:
common.restart_service(server_address, "opmonitor", ssh_user)
service_restarted = True
if proxy_needs_restart:
common.restart_service(server_address, "proxy", ssh_user)
service_restarted = True
# In case any services were restarted, wait a bit so the requests to the
# operational monitoring daemon succeed again.
if service_restarted:
print("Sleeping for %d seconds after the restart of services" % (
SERVICE_RESTART_SLEEP_SECONDS, ))
time.sleep(SERVICE_RESTART_SLEEP_SECONDS)
@contextmanager
def configure_and_restart_opmonitor(
server_addresses: Tuple, ssh_user: str, local_ini_parameters: dict):
""" A wrapper to be used with the with-keyword upon specific configuration needs.
Before the wrapped test is run, the configuration of the operational monitoring
daemon and/or the proxy is changed and the services are restarted in all the
given servers, if necessary.
After the test has been run, the initial parameters (if any were found) are
restored, otherwise they are removed, and the services are restarted again, if any
initial changes were made.
NOTE: When using this function with the with-keyword, DO NOT call sys.exit before
you are done with the with-block.
"""
# First put aside the original parameters that we are about to change.
initial_parameters = _get_initial_ini_parameters(
server_addresses, ssh_user, local_ini_parameters.keys())
# Now edit the parameters if necessary.
_configure_ini_parameters(
server_addresses, local_ini_parameters, initial_parameters, ssh_user, "edit")
# Pass control to the caller for running the test case.
yield
# Now restore the parameters if necessary.
_configure_ini_parameters(
server_addresses, local_ini_parameters, initial_parameters, ssh_user, "restore")
def _resolve_address(name_or_address):
""" Resolve the name or address given by the user to a valid IPv4 address. """
try:
addr_info = socket.getaddrinfo(
name_or_address, port=80, family=socket.AddressFamily.AF_INET)
# The function returns a list of 5-tuples with the address as a 2-tuple
return addr_info[0][4][0]
except Exception as e:
print("The server name '%s' cannot be resolved" % (name_or_address, ))
raise
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("--client-security-server", required=True,
dest="client_security_server")
argparser.add_argument("--producer-security-server", required=True,
dest="producer_security_server")
argparser.add_argument("--ssh-user", required=False,
dest="ssh_user")
argparser.add_argument("--service-restart-sleep-seconds",
type=int, dest="service_restart_sleep_seconds")
# Capture the rest of the arguments for passing to unittest.
argparser.add_argument('unittest_args', nargs='*')
args = argparser.parse_args()
CLIENT_SECURITY_SERVER_ADDRESS = _resolve_address(args.client_security_server)
PRODUCER_SECURITY_SERVER_ADDRESS = _resolve_address(args.producer_security_server)
SSH_USER = args.ssh_user
if args.service_restart_sleep_seconds:
SERVICE_RESTART_SLEEP_SECONDS = args.service_restart_sleep_seconds
# Pass the rest of the command line arguments to unittest.
unittest_args = [sys.argv[0]] + args.unittest_args
unittest.main(argv=unittest_args, verbosity=2)
| StarcoderdataPython |
3485379 | <reponame>adamjakab/BeetsPluginGoingRunning
# Copyright: Copyright (c) 2020., <NAME>
# Author: <NAME> <adam at jakab dot pro>
# License: See LICENSE.txt
import os
from beets import mediafile
from beets.dbcore import types
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigSource, load_yaml
from beetsplug.goingrunning.command import GoingRunningCommand
class GoingRunningPlugin(BeetsPlugin):
_default_plugin_config_file_name_ = 'config_default.yml'
def __init__(self):
super(GoingRunningPlugin, self).__init__()
# Read default configuration
config_file_path = os.path.join(os.path.dirname(__file__),
self._default_plugin_config_file_name_)
source = ConfigSource(load_yaml(config_file_path) or {},
config_file_path)
self.config.add(source)
# Add `play_count` field support
fld_name = u'play_count'
if fld_name not in mediafile.MediaFile.fields():
field = mediafile.MediaField(
mediafile.MP3DescStorageStyle(fld_name),
mediafile.StorageStyle(fld_name),
out_type=int
)
self.add_media_field(fld_name, field)
def commands(self):
return [GoingRunningCommand(self.config)]
@property
def item_types(self):
return {'play_count': types.INTEGER}
| StarcoderdataPython |
1655173 | <filename>PythonExtensions/Setup/Classifiers.py
class Development_Status(object):
Planning = "Development Status :: 1 - Planning"
PreAlpha = "Development Status :: 2 - Pre-Alpha"
Alpha = "Development Status :: 3 - Alpha"
Beta = "Development Status :: 4 - Beta"
Production_Stable = "Development Status :: 5 - Production/Stable"
Mature = "Development Status :: 6 - Mature"
Inactive = "Development Status :: 7 - Inactive"
class Environment(object):
class Console(object):
Console = "Environment :: Console"
Curses = "Environment :: Console :: Curses"
Framebuffer = "Environment :: Console :: Framebuffer"
Newt = "Environment :: Console :: Newt"
svgalib = "Environment :: Console :: svgalib"
class GPU(object):
GPU = "Environment :: GPU"
class NVIDIA_CUDA(object):
NVIDIA_CUDA = "Environment :: GPU :: NVIDIA CUDA"
Zero = "Environment :: GPU :: NVIDIA CUDA :: 9.0"
One = "Environment :: GPU :: NVIDIA CUDA :: 9.1"
Two = "Environment :: GPU :: NVIDIA CUDA :: 9.2"
Three = "Environment :: GPU :: NVIDIA CUDA :: 2.3"
Five = "Environment :: GPU :: NVIDIA CUDA :: 7.5"
Handhelds_PDAs = "Environment :: Handhelds/PDA's"
class MacOS_X(object):
MacOS_X = "Environment :: MacOS X"
Aqua = "Environment :: MacOS X :: Aqua"
Carbon = "Environment :: MacOS X :: Carbon"
Cocoa = "Environment :: MacOS X :: Cocoa"
No_Input_Output_Daemon = "Environment :: No Input/Output (Daemon)"
OpenStack = "Environment :: OpenStack"
Other_Environment = "Environment :: Other Environment"
Plugins = "Environment :: Plugins"
class Web_Environment(object):
Web_Environment = "Environment :: Web Environment"
Buffet = "Environment :: Web Environment :: Buffet"
Mozilla = "Environment :: Web Environment :: Mozilla"
ToscaWidgets = "Environment :: Web Environment :: ToscaWidgets"
Win32_MS_Windows = "Environment :: Win32 (MS Windows)"
class X11_Applications(object):
X11_Applications = "Environment :: X11 Applications"
GTK = "Environment :: X11 Applications :: GTK"
Gnome = "Environment :: X11 Applications :: Gnome"
KDE = "Environment :: X11 Applications :: KDE"
Qt = "Environment :: X11 Applications :: Qt"
class Framework(object):
class AWS_CDK(object):
AWS_CDK = "Framework :: AWS CDK"
One = "Framework :: AWS CDK :: 1"
AiiDA = "Framework :: AiiDA"
AsyncIO = "Framework :: AsyncIO"
BEAT = "Framework :: BEAT"
BFG = "Framework :: BFG"
Bob = "Framework :: Bob"
Bottle = "Framework :: Bottle"
class Buildout(object):
Buildout = "Framework :: Buildout"
Extension = "Framework :: Buildout :: Extension"
Recipe = "Framework :: Buildout :: Recipe"
class CastleCMS(object):
CastleCMS = "Framework :: CastleCMS"
Theme = "Framework :: CastleCMS :: Theme"
Chandler = "Framework :: Chandler"
CherryPy = "Framework :: CherryPy"
CubicWeb = "Framework :: CubicWeb"
Dash = "Framework :: Dash"
class Django(object):
Django = "Framework :: Django"
Ten = "Framework :: Django :: 1.10"
Eleven = "Framework :: Django :: 1.11"
Four = "Framework :: Django :: 1.4"
Five = "Framework :: Django :: 1.5"
Six = "Framework :: Django :: 1.6"
Seven = "Framework :: Django :: 1.7"
Eight = "Framework :: Django :: 1.8"
Nine = "Framework :: Django :: 1.9"
Zero = "Framework :: Django :: 3.0"
One = "Framework :: Django :: 3.1"
Two = "Framework :: Django :: 3.2"
class Django_CMS(object):
Django_CMS = "Framework :: Django CMS"
Four = "Framework :: Django CMS :: 3.4"
Five = "Framework :: Django CMS :: 3.5"
Six = "Framework :: Django CMS :: 3.6"
Seven = "Framework :: Django CMS :: 3.7"
Eight = "Framework :: Django CMS :: 3.8"
Flake8 = "Framework :: Flake8"
Flask = "Framework :: Flask"
Hypothesis = "Framework :: Hypothesis"
IDLE = "Framework :: IDLE"
IPython = "Framework :: IPython"
Jupyter = "Framework :: Jupyter"
Kedro = "Framework :: Kedro"
Lektor = "Framework :: Lektor"
Masonite = "Framework :: Masonite"
Matplotlib = "Framework :: Matplotlib"
Nengo = "Framework :: Nengo"
Odoo = "Framework :: Odoo"
Opps = "Framework :: Opps"
Paste = "Framework :: Paste"
class Pelican(object):
Pelican = "Framework :: Pelican"
Plugins = "Framework :: Pelican :: Plugins"
Themes = "Framework :: Pelican :: Themes"
class Plone(object):
Plone = "Framework :: Plone"
Two = "Framework :: Plone :: 5.2"
Three = "Framework :: Plone :: 5.3"
Zero = "Framework :: Plone :: 6.0"
One = "Framework :: Plone :: 5.1"
Addon = "Framework :: Plone :: Addon"
Core = "Framework :: Plone :: Core"
Theme = "Framework :: Plone :: Theme"
Pylons = "Framework :: Pylons"
Pyramid = "Framework :: Pyramid"
Pytest = "Framework :: Pytest"
Review_Board = "Framework :: Review Board"
class Robot_Framework(object):
Robot_Framework = "Framework :: Robot Framework"
Library = "Framework :: Robot Framework :: Library"
Tool = "Framework :: Robot Framework :: Tool"
Scrapy = "Framework :: Scrapy"
Setuptools_Plugin = "Framework :: Setuptools Plugin"
class Sphinx(object):
Sphinx = "Framework :: Sphinx"
Extension = "Framework :: Sphinx :: Extension"
Theme = "Framework :: Sphinx :: Theme"
Trac = "Framework :: Trac"
Trio = "Framework :: Trio"
Tryton = "Framework :: Tryton"
class TurboGears(object):
TurboGears = "Framework :: TurboGears"
Applications = "Framework :: TurboGears :: Applications"
Widgets = "Framework :: TurboGears :: Widgets"
Twisted = "Framework :: Twisted"
class Wagtail(object):
Wagtail = "Framework :: Wagtail"
One = "Framework :: Wagtail :: 1"
Two = "Framework :: Wagtail :: 2"
ZODB = "Framework :: ZODB"
class Zope(object):
Zope = "Framework :: Zope"
Two = "Framework :: Zope :: 2"
Three = "Framework :: Zope :: 3"
Four = "Framework :: Zope :: 4"
Five = "Framework :: Zope :: 5"
Zope2 = "Framework :: Zope2"
Zope3 = "Framework :: Zope3"
napari = "Framework :: napari"
tox = "Framework :: tox"
class Intended_Audience(object):
Customer_Service = "Intended Audience :: Customer Service"
Developers = "Intended Audience :: Developers"
Education = "Intended Audience :: Education"
End_Users_Desktop = "Intended Audience :: End Users/Desktop"
Financial_and_Insurance_Industry = "Intended Audience :: Financial and Insurance Industry"
Healthcare_Industry = "Intended Audience :: Healthcare Industry"
Information_Technology = "Intended Audience :: Information Technology"
Legal_Industry = "Intended Audience :: Legal Industry"
Manufacturing = "Intended Audience :: Manufacturing"
Other_Audience = "Intended Audience :: Other Audience"
Religion = "Intended Audience :: Religion"
Science_Research = "Intended Audience :: Science/Research"
System_Administrators = "Intended Audience :: System Administrators"
Telecommunications_Industry = "Intended Audience :: Telecommunications Industry"
class License(object):
Aladdin_Free_Public_License_AFPL = "License :: Aladdin Free Public License (AFPL)"
CC0_1_0_Universal_CC0_1_0_Public_Domain_Dedication = "License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication"
CeCILLB_Free_Software_License_Agreement_CECILLB = "License :: CeCILL-B Free Software License Agreement (CECILL-B)"
CeCILLC_Free_Software_License_Agreement_CECILLC = "License :: CeCILL-C Free Software License Agreement (CECILL-C)"
DFSG_approved = "License :: DFSG approved"
Eiffel_Forum_License_EFL = "License :: Eiffel Forum License (EFL)"
Free_For_Educational_Use = "License :: Free For Educational Use"
Free_For_Home_Use = "License :: Free For Home Use"
Free_To_Use_But_Restricted = "License :: Free To Use But Restricted"
Free_for_noncommercial_use = "License :: Free for non-commercial use"
Freely_Distributable = "License :: Freely Distributable"
Freeware = "License :: Freeware"
GUST_Font_License_1_0 = "License :: GUST Font License 1.0"
GUST_Font_License_20060930 = "License :: GUST Font License 2006-09-30"
Netscape_Public_License_NPL = "License :: Netscape Public License (NPL)"
Nokia_Open_Source_License_NOKOS = "License :: Nokia Open Source License (NOKOS)"
class OSI_Approved(object):
OSI_Approved = "License :: OSI Approved"
Academic_Free_License_AFL = "License :: OSI Approved :: Academic Free License (AFL)"
Apache_Software_License = "License :: OSI Approved :: Apache Software License"
Apple_Public_Source_License = "License :: OSI Approved :: Apple Public Source License"
Artistic_License = "License :: OSI Approved :: Artistic License"
Attribution_Assurance_License = "License :: OSI Approved :: Attribution Assurance License"
BSD_License = "License :: OSI Approved :: BSD License"
Boost_Software_License_1_0_BSL1_0 = "License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)"
CEA_CNRS_Inria_Logiciel_Libre_License_version_2_1_CeCILL2_1 = "License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)"
Common_Development_and_Distribution_License_1_0_CDDL1_0 = "License :: OSI Approved :: Common Development and Distribution License 1.0 (CDDL-1.0)"
Common_Public_License = "License :: OSI Approved :: Common Public License"
Eclipse_Public_License_1_0_EPL1_0 = "License :: OSI Approved :: Eclipse Public License 1.0 (EPL-1.0)"
Eclipse_Public_License_2_0_EPL2_0 = "License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)"
Eiffel_Forum_License = "License :: OSI Approved :: Eiffel Forum License"
European_Union_Public_Licence_1_0_EUPL_1_0 = "License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)"
European_Union_Public_Licence_1_1_EUPL_1_1 = "License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)"
European_Union_Public_Licence_1_2_EUPL_1_2 = "License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)"
GNU_Affero_General_Public_License_v3 = "License :: OSI Approved :: GNU Affero General Public License v3"
GNU_Affero_General_Public_License_v3_or_later_AGPLv3_plus = "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)"
GNU_Free_Documentation_License_FDL = "License :: OSI Approved :: GNU Free Documentation License (FDL)"
GNU_General_Public_License_GPL = "License :: OSI Approved :: GNU General Public License (GPL)"
GNU_General_Public_License_v2_GPLv2 = "License :: OSI Approved :: GNU General Public License v2 (GPLv2)"
GNU_General_Public_License_v2_or_later_GPLv2_plus = "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)"
GNU_General_Public_License_v3_GPLv3 = "License :: OSI Approved :: GNU General Public License v3 (GPLv3)"
GNU_General_Public_License_v3_or_later_GPLv3_plus = "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)"
GNU_Lesser_General_Public_License_v2_LGPLv2 = "License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)"
GNU_Lesser_General_Public_License_v2_or_later_LGPLv2_plus = "License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)"
GNU_Lesser_General_Public_License_v3_LGPLv3 = "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)"
GNU_Lesser_General_Public_License_v3_or_later_LGPLv3_plus = "License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)"
GNU_Library_or_Lesser_General_Public_License_LGPL = "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)"
Historical_Permission_Notice_and_Disclaimer_HPND = "License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND)"
IBM_Public_License = "License :: OSI Approved :: IBM Public License"
ISC_License_ISCL = "License :: OSI Approved :: ISC License (ISCL)"
Intel_Open_Source_License = "License :: OSI Approved :: Intel Open Source License"
Jabber_Open_Source_License = "License :: OSI Approved :: Jabber Open Source License"
MIT_License = "License :: OSI Approved :: MIT License"
MITRE_Collaborative_Virtual_Workspace_License_CVW = "License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)"
MirOS_License_MirOS = "License :: OSI Approved :: MirOS License (MirOS)"
Motosoto_License = "License :: OSI Approved :: Motosoto License"
Mozilla_Public_License_1_0_MPL = "License :: OSI Approved :: Mozilla Public License 1.0 (MPL)"
Mozilla_Public_License_1_1_MPL_1_1 = "License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)"
Mozilla_Public_License_2_0_MPL_2_0 = "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)"
Nethack_General_Public_License = "License :: OSI Approved :: Nethack General Public License"
Nokia_Open_Source_License = "License :: OSI Approved :: Nokia Open Source License"
Open_Group_Test_Suite_License = "License :: OSI Approved :: Open Group Test Suite License"
Open_Software_License_3_0_OSL3_0 = "License :: OSI Approved :: Open Software License 3.0 (OSL-3.0)"
PostgreSQL_License = "License :: OSI Approved :: PostgreSQL License"
Python_License_CNRI_Python_License = "License :: OSI Approved :: Python License (CNRI Python License)"
Python_Software_Foundation_License = "License :: OSI Approved :: Python Software Foundation License"
Qt_Public_License_QPL = "License :: OSI Approved :: Qt Public License (QPL)"
Ricoh_Source_Code_Public_License = "License :: OSI Approved :: Ricoh Source Code Public License"
SIL_Open_Font_License_1_1_OFL1_1 = "License :: OSI Approved :: SIL Open Font License 1.1 (OFL-1.1)"
Sleepycat_License = "License :: OSI Approved :: Sleepycat License"
Sun_Industry_Standards_Source_License_SISSL = "License :: OSI Approved :: Sun Industry Standards Source License (SISSL)"
Sun_Public_License = "License :: OSI Approved :: Sun Public License"
The_Unlicense_Unlicense = "License :: OSI Approved :: The Unlicense (Unlicense)"
Universal_Permissive_License_UPL = "License :: OSI Approved :: Universal Permissive License (UPL)"
University_of_Illinois_NCSA_Open_Source_License = "License :: OSI Approved :: University of Illinois/NCSA Open Source License"
Vovida_Software_License_1_0 = "License :: OSI Approved :: Vovida Software License 1.0"
W3C_License = "License :: OSI Approved :: W3C License"
X_Net_License = "License :: OSI Approved :: X.Net License"
Zope_Public_License = "License :: OSI Approved :: Zope Public License"
zlib_libpng_License = "License :: OSI Approved :: zlib/libpng License"
Other_Proprietary_License = "License :: Other/Proprietary License"
Public_Domain = "License :: Public Domain"
Repoze_Public_License = "License :: Repoze Public License"
class Natural_Language(object):
Afrikaans = "Natural Language :: Afrikaans"
Arabic = "Natural Language :: Arabic"
Basque = "Natural Language :: Basque"
Bengali = "Natural Language :: Bengali"
Bosnian = "Natural Language :: Bosnian"
Bulgarian = "Natural Language :: Bulgarian"
Cantonese = "Natural Language :: Cantonese"
Catalan = "Natural Language :: Catalan"
Chinese_Simplified = "Natural Language :: Chinese (Simplified)"
Chinese_Traditional = "Natural Language :: Chinese (Traditional)"
Croatian = "Natural Language :: Croatian"
Czech = "Natural Language :: Czech"
Danish = "Natural Language :: Danish"
Dutch = "Natural Language :: Dutch"
English = "Natural Language :: English"
Esperanto = "Natural Language :: Esperanto"
Finnish = "Natural Language :: Finnish"
French = "Natural Language :: French"
Galician = "Natural Language :: Galician"
German = "Natural Language :: German"
Greek = "Natural Language :: Greek"
Hebrew = "Natural Language :: Hebrew"
Hindi = "Natural Language :: Hindi"
Hungarian = "Natural Language :: Hungarian"
Icelandic = "Natural Language :: Icelandic"
Indonesian = "Natural Language :: Indonesian"
Irish = "Natural Language :: Irish"
Italian = "Natural Language :: Italian"
Japanese = "Natural Language :: Japanese"
Javanese = "Natural Language :: Javanese"
Korean = "Natural Language :: Korean"
Latin = "Natural Language :: Latin"
Latvian = "Natural Language :: Latvian"
Lithuanian = "Natural Language :: Lithuanian"
Macedonian = "Natural Language :: Macedonian"
Malay = "Natural Language :: Malay"
Marathi = "Natural Language :: Marathi"
Nepali = "Natural Language :: Nepali"
Norwegian = "Natural Language :: Norwegian"
Panjabi = "Natural Language :: Panjabi"
Persian = "Natural Language :: Persian"
Polish = "Natural Language :: Polish"
Portuguese = "Natural Language :: Portuguese"
Portuguese_Brazilian = "Natural Language :: Portuguese (Brazilian)"
Romanian = "Natural Language :: Romanian"
Russian = "Natural Language :: Russian"
Serbian = "Natural Language :: Serbian"
Slovak = "Natural Language :: Slovak"
Slovenian = "Natural Language :: Slovenian"
Spanish = "Natural Language :: Spanish"
Swedish = "Natural Language :: Swedish"
Tamil = "Natural Language :: Tamil"
Telugu = "Natural Language :: Telugu"
Thai = "Natural Language :: Thai"
Tibetan = "Natural Language :: Tibetan"
Turkish = "Natural Language :: Turkish"
Ukrainian = "Natural Language :: Ukrainian"
Urdu = "Natural Language :: Urdu"
Vietnamese = "Natural Language :: Vietnamese"
class Operating_System(object):
Android = "Operating System :: Android"
BeOS = "Operating System :: BeOS"
class MacOS(object):
MacOS = "Operating System :: MacOS"
MacOS_9 = "Operating System :: MacOS :: MacOS 9"
MacOS_X = "Operating System :: MacOS :: MacOS X"
class Microsoft(object):
Microsoft = "Operating System :: Microsoft"
MSDOS = "Operating System :: Microsoft :: MS-DOS"
class Windows(object):
Windows = "Operating System :: Microsoft :: Windows"
Windows_10 = "Operating System :: Microsoft :: Windows :: Windows 10"
Windows_3_1_or_Earlier = "Operating System :: Microsoft :: Windows :: Windows 3.1 or Earlier"
Windows_7 = "Operating System :: Microsoft :: Windows :: Windows 7"
Windows_8 = "Operating System :: Microsoft :: Windows :: Windows 8"
Windows_8_1 = "Operating System :: Microsoft :: Windows :: Windows 8.1"
Windows_95_98_2000 = "Operating System :: Microsoft :: Windows :: Windows 95/98/2000"
Windows_CE = "Operating System :: Microsoft :: Windows :: Windows CE"
Windows_NT_2000 = "Operating System :: Microsoft :: Windows :: Windows NT/2000"
Windows_Server_2003 = "Operating System :: Microsoft :: Windows :: Windows Server 2003"
Windows_Server_2008 = "Operating System :: Microsoft :: Windows :: Windows Server 2008"
Windows_Vista = "Operating System :: Microsoft :: Windows :: Windows Vista"
Windows_XP = "Operating System :: Microsoft :: Windows :: Windows XP"
OS_Independent = "Operating System :: OS Independent"
OS_2 = "Operating System :: OS/2"
Other_OS = "Operating System :: Other OS"
PDA_Systems = "Operating System :: PDA Systems"
class POSIX(object):
POSIX = "Operating System :: POSIX"
AIX = "Operating System :: POSIX :: AIX"
class BSD(object):
BSD = "Operating System :: POSIX :: BSD"
BSD_OS = "Operating System :: POSIX :: BSD :: BSD/OS"
FreeBSD = "Operating System :: POSIX :: BSD :: FreeBSD"
NetBSD = "Operating System :: POSIX :: BSD :: NetBSD"
OpenBSD = "Operating System :: POSIX :: BSD :: OpenBSD"
GNU_Hurd = "Operating System :: POSIX :: GNU Hurd"
HPUX = "Operating System :: POSIX :: HP-UX"
IRIX = "Operating System :: POSIX :: IRIX"
Linux = "Operating System :: POSIX :: Linux"
Other = "Operating System :: POSIX :: Other"
SCO = "Operating System :: POSIX :: SCO"
SunOS_Solaris = "Operating System :: POSIX :: SunOS/Solaris"
PalmOS = "Operating System :: PalmOS"
RISC_OS = "Operating System :: RISC OS"
Unix = "Operating System :: Unix"
iOS = "Operating System :: iOS"
class Programming_Language(object):
APL = "Programming Language :: APL"
ASP = "Programming Language :: ASP"
Ada = "Programming Language :: Ada"
Assembly = "Programming Language :: Assembly"
Awk = "Programming Language :: Awk"
Basic = "Programming Language :: Basic"
C = "Programming Language :: C"
C_sharp = "Programming Language :: C#"
C_plus_plus = "Programming Language :: C++"
Cold_Fusion = "Programming Language :: Cold Fusion"
Cython = "Programming Language :: Cython"
Delphi_Kylix = "Programming Language :: Delphi/Kylix"
Dylan = "Programming Language :: Dylan"
Eiffel = "Programming Language :: Eiffel"
EmacsLisp = "Programming Language :: Emacs-Lisp"
Erlang = "Programming Language :: Erlang"
Euler = "Programming Language :: Euler"
Euphoria = "Programming Language :: Euphoria"
F_sharp = "Programming Language :: F#"
Forth = "Programming Language :: Forth"
Fortran = "Programming Language :: Fortran"
Haskell = "Programming Language :: Haskell"
Java = "Programming Language :: Java"
JavaScript = "Programming Language :: JavaScript"
Kotlin = "Programming Language :: Kotlin"
Lisp = "Programming Language :: Lisp"
Logo = "Programming Language :: Logo"
ML = "Programming Language :: ML"
Modula = "Programming Language :: Modula"
OCaml = "Programming Language :: OCaml"
Object_Pascal = "Programming Language :: Object Pascal"
Objective_C = "Programming Language :: Objective C"
Other = "Programming Language :: Other"
Other_Scripting_Engines = "Programming Language :: Other Scripting Engines"
PHP = "Programming Language :: PHP"
PL_SQL = "Programming Language :: PL/SQL"
PROGRESS = "Programming Language :: PROGRESS"
Pascal = "Programming Language :: Pascal"
Perl = "Programming Language :: Perl"
Pike = "Programming Language :: Pike"
Pliant = "Programming Language :: Pliant"
Prolog = "Programming Language :: Prolog"
class Python(object):
Python = "Programming Language :: Python"
class Two(object):
Two = "Programming Language :: Python :: 3.2"
Only = "Programming Language :: Python :: 2 :: Only"
class Three(object):
Three = "Programming Language :: Python :: 3.3"
Only = "Programming Language :: Python :: 3 :: Only"
Four = "Programming Language :: Python :: 3.4"
Five = "Programming Language :: Python :: 3.5"
Six = "Programming Language :: Python :: 3.6"
Seven = "Programming Language :: Python :: 3.7"
Zero = "Programming Language :: Python :: 3.0"
One = "Programming Language :: Python :: 3.1"
Ten = "Programming Language :: Python :: 3.10"
Eight = "Programming Language :: Python :: 3.8"
Nine = "Programming Language :: Python :: 3.9"
class Implementation(object):
Implementation = "Programming Language :: Python :: Implementation"
CPython = "Programming Language :: Python :: Implementation :: CPython"
IronPython = "Programming Language :: Python :: Implementation :: IronPython"
Jython = "Programming Language :: Python :: Implementation :: Jython"
MicroPython = "Programming Language :: Python :: Implementation :: MicroPython"
PyPy = "Programming Language :: Python :: Implementation :: PyPy"
Stackless = "Programming Language :: Python :: Implementation :: Stackless"
R = "Programming Language :: R"
REBOL = "Programming Language :: REBOL"
Rexx = "Programming Language :: Rexx"
Ruby = "Programming Language :: Ruby"
Rust = "Programming Language :: Rust"
SQL = "Programming Language :: SQL"
Scheme = "Programming Language :: Scheme"
Simula = "Programming Language :: Simula"
Smalltalk = "Programming Language :: Smalltalk"
Tcl = "Programming Language :: Tcl"
Unix_Shell = "Programming Language :: Unix Shell"
Visual_Basic = "Programming Language :: Visual Basic"
XBasic = "Programming Language :: XBasic"
YACC = "Programming Language :: YACC"
Zope = "Programming Language :: Zope"
class Topic(object):
Adaptive_Technologies = "Topic :: Adaptive Technologies"
Artistic_Software = "Topic :: Artistic Software"
class Communications(object):
Communications = "Topic :: Communications"
BBS = "Topic :: Communications :: BBS"
class Chat(object):
Chat = "Topic :: Communications :: Chat"
ICQ = "Topic :: Communications :: Chat :: ICQ"
Internet_Relay_Chat = "Topic :: Communications :: Chat :: Internet Relay Chat"
Unix_Talk = "Topic :: Communications :: Chat :: Unix Talk"
Conferencing = "Topic :: Communications :: Conferencing"
class Email(object):
Email = "Topic :: Communications :: Email"
Address_Book = "Topic :: Communications :: Email :: Address Book"
Email_Clients_MUA = "Topic :: Communications :: Email :: Email Clients (MUA)"
Filters = "Topic :: Communications :: Email :: Filters"
Mail_Transport_Agents = "Topic :: Communications :: Email :: Mail Transport Agents"
Mailing_List_Servers = "Topic :: Communications :: Email :: Mailing List Servers"
class PostOffice(object):
PostOffice = "Topic :: Communications :: Email :: Post-Office"
IMAP = "Topic :: Communications :: Email :: Post-Office :: IMAP"
POP3 = "Topic :: Communications :: Email :: Post-Office :: POP3"
FIDO = "Topic :: Communications :: FIDO"
Fax = "Topic :: Communications :: Fax"
class File_Sharing(object):
File_Sharing = "Topic :: Communications :: FileIO Sharing"
Gnutella = "Topic :: Communications :: FileIO Sharing :: Gnutella"
Napster = "Topic :: Communications :: FileIO Sharing :: Napster"
Ham_Radio = "Topic :: Communications :: Ham Radio"
Internet_Phone = "Topic :: Communications :: Internet Phone"
Telephony = "Topic :: Communications :: Telephony"
Usenet_News = "Topic :: Communications :: Usenet News"
class Database(object):
Database = "Topic :: Database"
Database_Engines_Servers = "Topic :: Database :: Database Engines/Servers"
FrontEnds = "Topic :: Database :: Front-Ends"
class Desktop_Environment(object):
Desktop_Environment = "Topic :: Desktop Environment"
File_Managers = "Topic :: Desktop Environment :: FileIO Managers"
GNUstep = "Topic :: Desktop Environment :: GNUstep"
Gnome = "Topic :: Desktop Environment :: Gnome"
class K_Desktop_Environment_KDE(object):
K_Desktop_Environment_KDE = "Topic :: Desktop Environment :: K Desktop Environment (KDE)"
Themes = "Topic :: Desktop Environment :: K Desktop Environment (KDE) :: Themes"
class PicoGUI(object):
PicoGUI = "Topic :: Desktop Environment :: PicoGUI"
Applications = "Topic :: Desktop Environment :: PicoGUI :: Applications"
Themes = "Topic :: Desktop Environment :: PicoGUI :: Themes"
Screen_Savers = "Topic :: Desktop Environment :: Screen Savers"
class Window_Managers(object):
Window_Managers = "Topic :: Desktop Environment :: Window Managers"
class Afterstep(object):
Afterstep = "Topic :: Desktop Environment :: Window Managers :: Afterstep"
Themes = "Topic :: Desktop Environment :: Window Managers :: Afterstep :: Themes"
Applets = "Topic :: Desktop Environment :: Window Managers :: Applets"
class Blackbox(object):
Blackbox = "Topic :: Desktop Environment :: Window Managers :: Blackbox"
Themes = "Topic :: Desktop Environment :: Window Managers :: Blackbox :: Themes"
class CTWM(object):
CTWM = "Topic :: Desktop Environment :: Window Managers :: CTWM"
Themes = "Topic :: Desktop Environment :: Window Managers :: CTWM :: Themes"
class Enlightenment(object):
Enlightenment = "Topic :: Desktop Environment :: Window Managers :: Enlightenment"
Epplets = "Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Epplets"
Themes_DR15 = "Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Themes DR15"
Themes_DR16 = "Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Themes DR16"
Themes_DR17 = "Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Themes DR17"
class FVWM(object):
FVWM = "Topic :: Desktop Environment :: Window Managers :: FVWM"
Themes = "Topic :: Desktop Environment :: Window Managers :: FVWM :: Themes"
class Fluxbox(object):
Fluxbox = "Topic :: Desktop Environment :: Window Managers :: Fluxbox"
Themes = "Topic :: Desktop Environment :: Window Managers :: Fluxbox :: Themes"
class IceWM(object):
IceWM = "Topic :: Desktop Environment :: Window Managers :: IceWM"
Themes = "Topic :: Desktop Environment :: Window Managers :: IceWM :: Themes"
class MetaCity(object):
MetaCity = "Topic :: Desktop Environment :: Window Managers :: MetaCity"
Themes = "Topic :: Desktop Environment :: Window Managers :: MetaCity :: Themes"
class Oroborus(object):
Oroborus = "Topic :: Desktop Environment :: Window Managers :: Oroborus"
Themes = "Topic :: Desktop Environment :: Window Managers :: Oroborus :: Themes"
class Sawfish(object):
Sawfish = "Topic :: Desktop Environment :: Window Managers :: Sawfish"
Themes_0_30 = "Topic :: Desktop Environment :: Window Managers :: Sawfish :: Themes 0.30"
Themes_pre0_30 = "Topic :: Desktop Environment :: Window Managers :: Sawfish :: Themes pre-0.30"
class Waimea(object):
Waimea = "Topic :: Desktop Environment :: Window Managers :: Waimea"
Themes = "Topic :: Desktop Environment :: Window Managers :: Waimea :: Themes"
class Window_Maker(object):
Window_Maker = "Topic :: Desktop Environment :: Window Managers :: Window Maker"
Applets = "Topic :: Desktop Environment :: Window Managers :: Window Maker :: Applets"
Themes = "Topic :: Desktop Environment :: Window Managers :: Window Maker :: Themes"
class XFCE(object):
XFCE = "Topic :: Desktop Environment :: Window Managers :: XFCE"
Themes = "Topic :: Desktop Environment :: Window Managers :: XFCE :: Themes"
class Documentation(object):
Documentation = "Topic :: Documentation"
Sphinx = "Topic :: Documentation :: Sphinx"
class Education(object):
Education = "Topic :: Education"
Computer_Aided_Instruction_CAI = "Topic :: Education :: Computer Aided Instruction (CAI)"
Testing = "Topic :: Education :: Testing"
class Games_Entertainment(object):
Games_Entertainment = "Topic :: Games/Entertainment"
Arcade = "Topic :: Games/Entertainment :: Arcade"
Board_Games = "Topic :: Games/Entertainment :: Board Games"
First_Person_Shooters = "Topic :: Games/Entertainment :: First Person Shooters"
Fortune_Cookies = "Topic :: Games/Entertainment :: Fortune Cookies"
MultiUser_Dungeons_MUD = "Topic :: Games/Entertainment :: Multi-User Dungeons (MUD)"
Puzzle_Games = "Topic :: Games/Entertainment :: Puzzle Games"
Real_Time_Strategy = "Topic :: Games/Entertainment :: Real Time Strategy"
RolePlaying = "Topic :: Games/Entertainment :: Role-Playing"
SideScrolling_Arcade_Games = "Topic :: Games/Entertainment :: Side-Scrolling/Arcade Games"
Simulation = "Topic :: Games/Entertainment :: Simulation"
Turn_Based_Strategy = "Topic :: Games/Entertainment :: Turn Based Strategy"
Home_Automation = "Topic :: Home Automation"
class Internet(object):
Internet = "Topic :: Internet"
File_Transfer_Protocol_FTP = "Topic :: Internet :: FileIO Transfer Protocol (FTP)"
Finger = "Topic :: Internet :: Finger"
Log_Analysis = "Topic :: Internet :: Log Analysis"
Name_Service_DNS = "Topic :: Internet :: Name Service (DNS)"
Proxy_Servers = "Topic :: Internet :: Proxy Servers"
WAP = "Topic :: Internet :: WAP"
class WWW_HTTP(object):
WWW_HTTP = "Topic :: Internet :: WWW/HTTP"
Browsers = "Topic :: Internet :: WWW/HTTP :: Browsers"
class Dynamic_Content(object):
Dynamic_Content = "Topic :: Internet :: WWW/HTTP :: Dynamic Content"
CGI_Tools_Libraries = "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries"
Content_Management_System = "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Content Management System"
Message_Boards = "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards"
News_Diary = "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary"
Page_Counters = "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Page Counters"
Wiki = "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Wiki"
HTTP_Servers = "Topic :: Internet :: WWW/HTTP :: HTTP Servers"
Indexing_Search = "Topic :: Internet :: WWW/HTTP :: Indexing/Search"
Session = "Topic :: Internet :: WWW/HTTP :: Session"
class Site_Management(object):
Site_Management = "Topic :: Internet :: WWW/HTTP :: Site Management"
Link_Checking = "Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking"
class WSGI(object):
WSGI = "Topic :: Internet :: WWW/HTTP :: WSGI"
Application = "Topic :: Internet :: WWW/HTTP :: WSGI :: Application"
Middleware = "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware"
Server = "Topic :: Internet :: WWW/HTTP :: WSGI :: Server"
XMPP = "Topic :: Internet :: XMPP"
Z39_50 = "Topic :: Internet :: Z39.50"
class Multimedia(object):
Multimedia = "Topic :: Multimedia"
class Graphics(object):
Graphics = "Topic :: Multimedia :: Graphics"
ThreeDimension_Modeling = "Topic :: Multimedia :: Graphics :: 3D Modeling"
ThreeDimension_Rendering = "Topic :: Multimedia :: Graphics :: 3D Rendering"
class Capture(object):
Capture = "Topic :: Multimedia :: Graphics :: Capture"
Digital_Camera = "Topic :: Multimedia :: Graphics :: Capture :: Digital Camera"
Scanners = "Topic :: Multimedia :: Graphics :: Capture :: Scanners"
Screen_Capture = "Topic :: Multimedia :: Graphics :: Capture :: Screen Capture"
class Editors(object):
Editors = "Topic :: Multimedia :: Graphics :: Editors"
RasterBased = "Topic :: Multimedia :: Graphics :: Editors :: Raster-Based"
VectorBased = "Topic :: Multimedia :: Graphics :: Editors :: Vector-Based"
Graphics_Conversion = "Topic :: Multimedia :: Graphics :: Graphics Conversion"
Presentation = "Topic :: Multimedia :: Graphics :: Presentation"
Viewers = "Topic :: Multimedia :: Graphics :: Viewers"
class Sound_Audio(object):
Sound_Audio = "Topic :: Multimedia :: Sound/Audio"
Analysis = "Topic :: Multimedia :: Sound/Audio :: Analysis"
class CD_Audio(object):
CD_Audio = "Topic :: Multimedia :: Sound/Audio :: CD Audio"
CD_Playing = "Topic :: Multimedia :: Sound/Audio :: CD Audio :: CD Playing"
CD_Ripping = "Topic :: Multimedia :: Sound/Audio :: CD Audio :: CD Ripping"
CD_Writing = "Topic :: Multimedia :: Sound/Audio :: CD Audio :: CD Writing"
Capture_Recording = "Topic :: Multimedia :: Sound/Audio :: Capture/Recording"
Conversion = "Topic :: Multimedia :: Sound/Audio :: Conversion"
Editors = "Topic :: Multimedia :: Sound/Audio :: Editors"
MIDI = "Topic :: Multimedia :: Sound/Audio :: MIDI"
Mixers = "Topic :: Multimedia :: Sound/Audio :: Mixers"
class Players(object):
Players = "Topic :: Multimedia :: Sound/Audio :: Players"
MP3 = "Topic :: Multimedia :: Sound/Audio :: Players :: MP3"
Sound_Synthesis = "Topic :: Multimedia :: Sound/Audio :: Sound Synthesis"
Speech = "Topic :: Multimedia :: Sound/Audio :: Speech"
class Video(object):
Video = "Topic :: Multimedia :: Video"
Capture = "Topic :: Multimedia :: Video :: Capture"
Conversion = "Topic :: Multimedia :: Video :: Conversion"
Display = "Topic :: Multimedia :: Video :: Display"
NonLinear_Editor = "Topic :: Multimedia :: Video :: Non-Linear Editor"
class Office_Business(object):
Office_Business = "Topic :: Office/Business"
class Financial(object):
Financial = "Topic :: Office/Business :: Financial"
Accounting = "Topic :: Office/Business :: Financial :: Accounting"
Investment = "Topic :: Office/Business :: Financial :: Investment"
PointOfSale = "Topic :: Office/Business :: Financial :: Point-Of-Sale"
Spreadsheet = "Topic :: Office/Business :: Financial :: Spreadsheet"
Groupware = "Topic :: Office/Business :: Groupware"
News_Diary = "Topic :: Office/Business :: News/Diary"
Office_Suites = "Topic :: Office/Business :: Office Suites"
Scheduling = "Topic :: Office/Business :: Scheduling"
Other_Nonlisted_Topic = "Topic :: Other/Nonlisted Topic"
Printing = "Topic :: Printing"
Religion = "Topic :: Religion"
class Scientific_Engineering(object):
Scientific_Engineering = "Topic :: Scientific/Engineering"
Artificial_Intelligence = "Topic :: Scientific/Engineering :: Artificial Intelligence"
Artificial_Life = "Topic :: Scientific/Engineering :: Artificial Life"
Astronomy = "Topic :: Scientific/Engineering :: Astronomy"
Atmospheric_Science = "Topic :: Scientific/Engineering :: Atmospheric Science"
BioInformatics = "Topic :: Scientific/Engineering :: Bio-Informatics"
Chemistry = "Topic :: Scientific/Engineering :: Chemistry"
Electronic_Design_Automation_EDA = "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)"
GIS = "Topic :: Scientific/Engineering :: GIS"
Human_Machine_Interfaces = "Topic :: Scientific/Engineering :: Human Machine Interfaces"
Hydrology = "Topic :: Scientific/Engineering :: Hydrology"
Image_Processing = "Topic :: Scientific/Engineering :: Image Processing"
Image_Recognition = "Topic :: Scientific/Engineering :: Image Recognition"
Information_Analysis = "Topic :: Scientific/Engineering :: Information Analysis"
Interface_Engine_Protocol_Translator = "Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator"
Mathematics = "Topic :: Scientific/Engineering :: Mathematics"
Medical_Science_Apps_ = "Topic :: Scientific/Engineering :: Medical Science Apps."
Physics = "Topic :: Scientific/Engineering :: Physics"
Visualization = "Topic :: Scientific/Engineering :: Visualization"
class Security(object):
Security = "Topic :: Security"
Cryptography = "Topic :: Security :: Cryptography"
class Sociology(object):
Sociology = "Topic :: Sociology"
Genealogy = "Topic :: Sociology :: Genealogy"
History = "Topic :: Sociology :: History"
class Software_Development(object):
Software_Development = "Topic :: Software Development"
Assemblers = "Topic :: Software Development :: Assemblers"
Bug_Tracking = "Topic :: Software Development :: Bug Tracking"
Build_Tools = "Topic :: Software Development :: Build Tools"
Code_Generators = "Topic :: Software Development :: Code Generators"
Compilers = "Topic :: Software Development :: Compilers"
Debuggers = "Topic :: Software Development :: Debuggers"
Disassemblers = "Topic :: Software Development :: Disassemblers"
Documentation = "Topic :: Software Development :: Documentation"
Embedded_Systems = "Topic :: Software Development :: Embedded Systems"
Internationalization = "Topic :: Software Development :: Internationalization"
Interpreters = "Topic :: Software Development :: Interpreters"
class Libraries(object):
Libraries = "Topic :: Software Development :: Libraries"
Application_Frameworks = "Topic :: Software Development :: Libraries :: Application Frameworks"
Java_Libraries = "Topic :: Software Development :: Libraries :: Java Libraries"
PHP_Classes = "Topic :: Software Development :: Libraries :: PHP Classes"
Perl_Modules = "Topic :: Software Development :: Libraries :: Perl Modules"
Pike_Modules = "Topic :: Software Development :: Libraries :: Pike Modules"
Python_Modules = "Topic :: Software Development :: Libraries :: Python Modules"
Ruby_Modules = "Topic :: Software Development :: Libraries :: Ruby Modules"
Tcl_Extensions = "Topic :: Software Development :: Libraries :: Tcl Extensions"
pygame = "Topic :: Software Development :: Libraries :: pygame"
Localization = "Topic :: Software Development :: Localization"
class Object_Brokering(object):
Object_Brokering = "Topic :: Software Development :: Object Brokering"
CORBA = "Topic :: Software Development :: Object Brokering :: CORBA"
Preprocessors = "Topic :: Software Development :: Pre-processors"
Quality_Assurance = "Topic :: Software Development :: Quality Assurance"
class Testing(object):
Testing = "Topic :: Software Development :: Testing"
Acceptance = "Topic :: Software Development :: Testing :: Acceptance"
BDD = "Topic :: Software Development :: Testing :: BDD"
Mocking = "Topic :: Software Development :: Testing :: Mocking"
Traffic_Generation = "Topic :: Software Development :: Testing :: Traffic Generation"
Unit = "Topic :: Software Development :: Testing :: Unit"
User_Interfaces = "Topic :: Software Development :: User Interfaces"
class Version_Control(object):
Version_Control = "Topic :: Software Development :: Version Control"
Bazaar = "Topic :: Software Development :: Version Control :: Bazaar"
CVS = "Topic :: Software Development :: Version Control :: CVS"
Git = "Topic :: Software Development :: Version Control :: Git"
Mercurial = "Topic :: Software Development :: Version Control :: Mercurial"
RCS = "Topic :: Software Development :: Version Control :: RCS"
SCCS = "Topic :: Software Development :: Version Control :: SCCS"
Widget_Sets = "Topic :: Software Development :: Widget Sets"
class System(object):
System = "Topic :: System"
class Archiving(object):
Archiving = "Topic :: System :: Archiving"
Backup = "Topic :: System :: Archiving :: Backup"
Compression = "Topic :: System :: Archiving :: Compression"
Mirroring = "Topic :: System :: Archiving :: Mirroring"
Packaging = "Topic :: System :: Archiving :: Packaging"
Benchmark = "Topic :: System :: Benchmark"
class Boot(object):
Boot = "Topic :: System :: Boot"
Init = "Topic :: System :: Boot :: Init"
Clustering = "Topic :: System :: Clustering"
Console_Fonts = "Topic :: System :: Console Fonts"
Distributed_Computing = "Topic :: System :: Distributed Computing"
Emulators = "Topic :: System :: Emulators"
Filesystems = "Topic :: System :: Filesystems"
class Hardware(object):
Hardware = "Topic :: System :: Hardware"
Hardware_Drivers = "Topic :: System :: Hardware :: Hardware Drivers"
Mainframes = "Topic :: System :: Hardware :: Mainframes"
Symmetric_Multiprocessing = "Topic :: System :: Hardware :: Symmetric Multi-processing"
Installation_Setup = "Topic :: System :: Installation/Setup"
Logging = "Topic :: System :: Logging"
Monitoring = "Topic :: System :: Monitoring"
class Networking(object):
Networking = "Topic :: System :: Networking"
Firewalls = "Topic :: System :: Networking :: Firewalls"
class Monitoring(object):
Monitoring = "Topic :: System :: Networking :: Monitoring"
Hardware_Watchdog = "Topic :: System :: Networking :: Monitoring :: Hardware Watchdog"
Time_Synchronization = "Topic :: System :: Networking :: Time Synchronization"
Operating_System = "Topic :: System :: Operating System"
class Operating_System_Kernels(object):
Operating_System_Kernels = "Topic :: System :: Operating System Kernels"
BSD = "Topic :: System :: Operating System Kernels :: BSD"
GNU_Hurd = "Topic :: System :: Operating System Kernels :: GNU Hurd"
Linux = "Topic :: System :: Operating System Kernels :: Linux"
Power_UPS = "Topic :: System :: Power (UPS)"
Recovery_Tools = "Topic :: System :: Recovery Tools"
Shells = "Topic :: System :: Shells"
Software_Distribution = "Topic :: System :: Software Distribution"
System_Shells = "Topic :: System :: System Shells"
class Systems_Administration(object):
Systems_Administration = "Topic :: System :: Systems Administration"
class Authentication_Directory(object):
Authentication_Directory = "Topic :: System :: Systems Administration :: Authentication/Directory"
LDAP = "Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP"
NIS = "Topic :: System :: Systems Administration :: Authentication/Directory :: NIS"
class Terminals(object):
Terminals = "Topic :: Terminals"
Serial = "Topic :: Terminals :: Serial"
Telnet = "Topic :: Terminals :: Telnet"
Terminal_Emulators_X_Terminals = "Topic :: Terminals :: Terminal Emulators/X Terminals"
class Text_Editors(object):
Text_Editors = "Topic :: Text Editors"
Documentation = "Topic :: Text Editors :: Documentation"
Emacs = "Topic :: Text Editors :: Emacs"
Integrated_Development_Environments_IDE = "Topic :: Text Editors :: Integrated Development Environments (IDE)"
Text_Processing = "Topic :: Text Editors :: Text Processing"
Word_Processors = "Topic :: Text Editors :: Word Processors"
class Text_Processing(object):
Text_Processing = "Topic :: Text Processing"
Filters = "Topic :: Text Processing :: Filters"
Fonts = "Topic :: Text Processing :: Fonts"
General = "Topic :: Text Processing :: General"
Indexing = "Topic :: Text Processing :: Indexing"
Linguistic = "Topic :: Text Processing :: Linguistic"
class Markup(object):
Markup = "Topic :: Text Processing :: Markup"
HTML = "Topic :: Text Processing :: Markup :: HTML"
LaTeX = "Topic :: Text Processing :: Markup :: LaTeX"
Markdown = "Topic :: Text Processing :: Markup :: Markdown"
SGML = "Topic :: Text Processing :: Markup :: SGML"
VRML = "Topic :: Text Processing :: Markup :: VRML"
XML = "Topic :: Text Processing :: Markup :: XML"
reStructuredText = "Topic :: Text Processing :: Markup :: reStructuredText"
Utilities = "Topic :: Utilities"
class Typing(object):
Typed = "Typing :: Typed"
| StarcoderdataPython |
337076 | __________________________________________________________________________________________________
sample 44 ms submission
class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
n=len(coordinates)
if n<=2:
return True
x0, y0 = coordinates[0]
x1, y1 = coordinates[1]
slope=(x1-x0)*y0-(y1-y0)*x0
for x,y in coordinates:
if (x1-x0)*y-(y1-y0)*x!=slope:
return False
return True
__________________________________________________________________________________________________
sample 48 ms submission
class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
for i in range(2,len(coordinates)):
if int(coordinates[i-1][0] - coordinates[i-2][0]) == 0:
return False
else:
m1 = (coordinates[i-1][1] - coordinates[i-2][1]) / (coordinates[i-1][0] - coordinates[i-2][0])
if int(coordinates[i][0] - coordinates[i-1][0]) == 0:
return False
else:
m2 = (coordinates[i][1] - coordinates[i-1][1]) / (coordinates[i][0] - coordinates[i-1][0])
if m1 != m2:
return False
return True
__________________________________________________________________________________________________
| StarcoderdataPython |
1700032 | <filename>pymatflow/qe/post/scripts/post-qe-scf.py
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import sys
import datetime
import argparse
import matplotlib.pyplot as plt
from pymatflow.qe.post.scf import ScfOut
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="directory of static scf running", type=str, default="tmp-qe-static")
parser.add_argument("-f", "--file", help="output of static scf running: name only without path", type=str, default="static-scf.out")
args = parser.parse_args()
os.chdir(args.directory)
os.system("mkdir -p post-processing")
os.chdir("post-processing")
scf = ScfOut()
scf.get_info(file="../%s" % args.file)
# now in the directory of post-processing, output informations:
# plot the scf energies
plt.plot(scf.run_info["scf_energies"])
plt.title("Total energy per scf step")
plt.xlabel("Scf step")
plt.ylabel("Total energy")
plt.tight_layout()
plt.savefig("total-energy-per-scf-step.png")
plt.close()
with open("scf-info.md", 'w', encoding='utf-8') as fout:
fout.write("# SCF实验统计\n")
fout.write("## SCF参数\n")
for item in scf.scf_params:
fout.write("- %s: %s\n" % (item, str(scf.scf_params[item])))
fout.write("## 运行信息\n")
# calculate the running time and print it out
# depending on the value of seconds in the time string, there are three situations:
# when second is smaller than 10 it will be divided from xx:xx x, and when second
# is larger or equal to 10 it will be together(xx:xx:xx).
# when all the three xx is smaller than 1 it will be like x: x: x
# so we have to preprocess it to build the right time string to pass into
# datetime.datetime.strptime()
if len(scf.run_info["start_time"].split()) == 8:
start_str = scf.run_info["start_time"].split()[5]+"-"+scf.run_info["start_time"].split()[7]
elif len(scf.run_info["start_time"].split()) == 9:
start_str = scf.run_info["start_time"].split()[5]+"-"+scf.run_info["start_time"].split()[7]+scf.run_info["start_time"].split()[8]
elif len(scf.run_info["start_time"].split()) == 10:
start_str = scf.run_info["start_time"].split()[5]+"-"+scf.run_info["start_time"].split()[7]+scf.run_info["start_time"].split()[8]+scf.run_info["start_time"].split()[9]
else:
print("===============================================\n")
print(" Warning !!!\n")
print("===============================================\n")
print("qe.post.scf.markdown_report:\n")
print("failed to parse start_time string\n")
sys.exit(1)
if len(scf.run_info["stop_time"].split()) == 7:
stop_str = scf.run_info["stop_time"].split()[6]+"-"+scf.run_info["stop_time"].split()[5]
elif len(scf.run_info["stop_time"].split()) == 8:
stop_str = scf.run_info["stop_time"].split()[7]+"-"+scf.run_info["stop_time"].split()[5]+scf.run_info["stop_time"].split()[6]
elif len(scf.run_info["stop_time"].split()) == 9:
stop_str = scf.run_info["stop_time"].split()[8]+"-"+scf.run_info["stop_time"].split()[5]+scf.run_info["stop_time"].split()[6]+scf.run_info["stop_time"].split()[7]
else:
print("===============================================\n")
print(" Warning !!!\n")
print("===============================================\n")
print("qe.post.scf.markdown_report:\n")
print("failed to parse stop_time string\n")
sys.exit(1)
start = datetime.datetime.strptime(start_str, "%d%b%Y-%H:%M:%S")
stop = datetime.datetime.strptime(stop_str, "%d%b%Y-%H:%M:%S")
delta_t = stop -start
fout.write("- Time consuming:\n")
fout.write(" - totally %.1f seconds, or %.3f minutes or %.5f hours\n" % (delta_t.total_seconds(), delta_t.total_seconds()/60, delta_t.total_seconds()/3600))
# end the time information
for item in scf.run_info:
fout.write("- %s: %s\n" % (item, str(scf.run_info[item])))
fout.write("## 运行信息图示\n")
fout.write("Total energy per scf step\n")
fout.write("\n")
# end of information output
os.chdir("../")
os.chdir("../")
# --------------------------------------------------------------------------
# print information to the terminal
# --------------------------------------------------------------------------
print("=====================================================================\n")
print(" post-qe-scf.py\n")
print("---------------------------------------------------------------------\n")
print("\n")
| StarcoderdataPython |
11224091 | <reponame>opengauss-mirror/openGauss-OM<filename>script/local/KerberosUtility.py
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#-------------------------------------------------------------------------
#
# KerberosUtility.py
# KerberosUtility.py is a utility to handler kerberos things
#
# IDENTIFICATION
# src/manager/om/script/local/KerberosUtility.py
#
#-------------------------------------------------------------------------
import sys
import os
import getopt
import subprocess
import shutil
import pwd
sys.path.append(sys.path[0] + "/../")
from gspylib.common.VersionInfo import VersionInfo
from gspylib.common.DbClusterInfo import initParserXMLFile, dbClusterInfo
from gspylib.common.Common import DefaultValue
from gspylib.common.DbClusterInfo import dbClusterInfo
from gspylib.common.GaussLog import GaussLog
from gspylib.common.ErrorCode import ErrorCode
from gspylib.threads.SshTool import SshTool
from gspylib.os.gsfile import g_file
from multiprocessing.dummy import Pool as ThreadPool
METHOD_TRUST = "trust"
BIGDATA_HOME = "$BIGDATA_HOME"
DUMMY_STANDBY_INSTANCE = 2
INSTANCE_ROLE_COODINATOR = 3
g_ignorepgHbaMiss = True
CONFIG_ITEM_TYPE = "ConfigInstance"
g_clusterInfo = None
#Gauss200 IP Hosts Mapping
GAUSS_HOSTS_MAPPING_FLAG = "#%s IP Hosts Mapping" % VersionInfo.PRODUCT_NAME
#write /etc/hosts kerberos flag
KERBEROS_HOSTS_MAPPING_FLAG = "#Kerberos IP Hosts Mapping"
VALUE_LIST = ["PGKRBSRVNAME", "KRBHOSTNAME", "MPPDB_KRB5_FILE_PATH",
"KRB5RCACHETYPE"]
SERVER_ENV_LIST = ["KRB_HOME", "KRB5_CONFIG", "KRB5_KDC_PROFILE"]
g_logger = None
g_opts = None
g_sshTool = None
class CmdOptions():
"""
install the cluster on local node
"""
def __init__(self):
self.action = ""
self.user = ""
self.mpprcFile = ""
self.clusterInfo = None
self.principal = ""
self.keytab = ""
self.dbNodeInfo = None
self.krbHomePath = ""
self.krbConfigPath = ""
self.server = False
self.client = False
self.gausshome = ""
self.gausshome_kerberso = ""
def initGlobals():
"""
init global variables
input : NA
output: NA
"""
global g_opts
logFile = DefaultValue.getOMLogPath(DefaultValue.LOCAL_LOG_FILE,
g_opts.user, "")
# Init logger
global g_logger
g_logger = GaussLog(logFile, "KerberosUtility")
global g_clusterInfo
# init for clusterInfo
g_clusterInfo = dbClusterInfo()
g_clusterInfo.initFromStaticConfig(g_opts.user)
g_logger.debug("Cluster information: \n%s." % str(g_clusterInfo))
global g_sshTool
nodenames = g_clusterInfo.getClusterNodeNames()
g_sshTool = SshTool(nodenames)
try:
# init for __clusterInfo and __dbNodeInfo
g_opts.clusterInfo = g_clusterInfo
hostName = DefaultValue.GetHostIpOrName()
g_opts.dbNodeInfo = g_clusterInfo.getDbNodeByName(hostName)
#get env variable file
g_opts.mpprcFile = DefaultValue.getMpprcFile()
# create kerberso directory under GAUSSHOME
gausshome = DefaultValue.getInstallDir(g_opts.user)
if not gausshome:
raise Exception(ErrorCode.GAUSS_518["GAUSS_51802"] % "GAUSSHOME")
g_opts.gausshome = gausshome
g_opts.gausshome_kerberso = os.path.join(gausshome, "kerberos")
if not os.path.isdir(g_opts.gausshome_kerberso):
dir_permission = 0o700
os.makedirs(g_opts.gausshome_kerberso, mode=dir_permission)
if g_opts.action == "install" and g_opts.server:
g_logger.debug("%s the kerberos server.", g_opts.action)
else:
if g_opts.action == "uninstall":
g_logger.debug("%s the kerberos tool.", g_opts.action)
else:
g_logger.debug("%s the kerberos client.", g_opts.action)
tablespace = DefaultValue.getEnv("ELK_SYSTEM_TABLESPACE")
if tablespace is not None and tablespace != "":
xmlfile = os.path.join(os.path.dirname(g_opts.mpprcFile),
DefaultValue.FI_ELK_KRB_XML)
else:
xmlfile = os.path.join(os.path.dirname(g_opts.mpprcFile),
DefaultValue.FI_KRB_XML)
xmlfile = os.path.realpath(xmlfile)
if not os.path.isfile(xmlfile):
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % xmlfile)
rootNode = initParserXMLFile(xmlfile)
elementArray = rootNode.findall("property")
for element in elementArray:
if (element.find('name').text == "mppdb.kerberos.principal"
or element.find('name').text == "elk.kerberos.principal"):
g_opts.principal = element.find('value').text
if (element.find('name').text == "mppdb.kerberos.keytab"
or element.find('name').text == "elk.kerberos.keytab"):
g_opts.keytab = element.find('value').text
if (element.find('name').text == 'KRB_HOME'):
g_opts.krbHomePath = element.find('value').text
if (element.find('name').text == 'KRB_CONFIG'):
g_opts.krbConfigPath = element.find('value').text
if(g_opts.principal == "" or g_opts.keytab == ""
or g_opts.krbHomePath == "" or g_opts.krbConfigPath == ""):
raise Exception(ErrorCode.GAUSS_512["GAUSS_51200"] %
"mppdb.kerberos.principal or "
"mppdb.kerberos.keytab"
" or KRB_HOME or KRB_CONFIG"
+ " The xml file is %s." % xmlfile)
except Exception as e:
g_logger.logExit(str(e))
g_logger.debug("Instance information on local node:\n%s."
% str(g_opts.dbNodeInfo))
class Kerberos():
def __init__(self):
self.__dbNodeInfo = None
self.__allIps = []
self.__cooConfig = {}
self.__dataConfig = {}
self.__gtmConfig = {}
self.__cmsConfig = {}
self.__IpStringList = []
self.__DNStringList = []
def __rollback(self, isServer):
g_logger.log("An error happened in executing the command, "
"begin rollback work...")
if isServer:
self.__rollbackServerInstall()
else:
self.__uninstall(True)
g_logger.log("rollback work complete.")
def __rollbackServerInstall(self):
if os.path.isdir(g_opts.gausshome_kerberso):
shutil.rmtree(g_opts.gausshome_kerberso)
self.__clearEnvironmentVariableValue(True)
self.__cleanAuthConfig()
self.__cleanServer()
def __triggerJob(self, isUninstall, isServer=False):
'''
function: triggerJob for call kinit
'''
if(not isUninstall):
g_logger.log("start triggerJob.")
if isServer:
self.__initUser()
self.__startServer()
self.__distributeKeyAndSite()
self.__setServiceCron()
else:
self.__executeJob()
g_logger.log("successfully start triggerJob.")
else:
g_logger.log("stop triggerJob.")
self.__cancelCron()
g_logger.log("successfully stop triggerJob.")
def __clearEnvironmentVariableValue(self, isServer=False):
"""
function: clear kerberos EnvironmentVariable
input: isServer
output: NA
"""
for value in VALUE_LIST:
cmd = "sed -i -e '/^.*%s=/d' '%s'" % (value, g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
+ "Error:\n%s" % output)
if isServer:
for value in SERVER_ENV_LIST:
cmd = "sed -i -e '/^.*%s=/d' '%s' && " \
"sed -i -e '/^.*PATH=\$%s/d' '%s' && " \
"sed -i -e '/^.*LD_LIBRARY_PATH=\$%s/d' '%s'" % \
(value, g_opts.mpprcFile, value, g_opts.mpprcFile,
value, g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if(status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
+ "Error:\n%s" % output)
g_logger.log("successfully clear kerberos env Variables.")
def __setUserEnvVariable(self, isUninstall, isServer=False,
isRollBack=False):
'''
function: set user env Variable
'''
g_logger.log("start set user env Variable.")
if(not isUninstall):
try:
if isServer:
self.__clearEnvironmentVariableValue(True)
# SET variable KRB_HOME
cmd = "echo \"export KRB_HOME=%s\" >> %s" % \
(g_opts.gausshome, g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_518["GAUSS_51804"] %
"KRB_HOME" + output
+ "\nThe cmd is %s " % cmd)
g_logger.log("Config environment variable KRB_HOME "
"successfully.")
# SET variable KRB5_CONFIG
cmd = "echo \"export KRB5_CONFIG=%s/krb5.conf\" >> %s" % \
(g_opts.gausshome_kerberso, g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_518["GAUSS_51804"] %
"KRB5_CONFIG" + output
+ "\nThe cmd is %s " % cmd)
g_logger.log("Config environment variable KRB5_CONFIG "
"successfully.")
# SET variable KRB5_KDC_PROFILE
cmd = "echo \"export KRB5_KDC_PROFILE=%s/kdc.conf\" " \
">> %s" \
% (g_opts.gausshome_kerberso, g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_518["GAUSS_51804"] %
"KRB5_KDC_PROFILE" + output
+ "\nThe cmd is %s " % cmd)
g_logger.log("Config environment "
"variable KRB5_KDC_PROFILE successfully.")
# SET variable PATH
cmd = "echo \"export PATH=\$KRB_HOME/bin:\$PATH\" " \
">> %s" % (g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_518["GAUSS_51804"] %
"PATH" + output
+ "\nThe cmd is %s " % cmd)
g_logger.log("Config environment variable PATH "
"successfully.")
# SET variable LD_LIBRARY_PATH
cmd = "echo \"export LD_LIBRARY_PATH=\$KRB_HOME/lib:" \
"\$LD_LIBRARY_PATH\" >> %s" % (g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_518["GAUSS_51804"] %
"LD_LIBRARY_PATH" + output
+ "\nThe cmd is %s " % cmd)
g_logger.log("Config environment variable LD_LIBRARY_PATH "
"successfully.")
else:
# get principal
principals = g_opts.principal.split("/")
if (len(g_opts.principal.split('/')) < 2):
raise Exception(ErrorCode.GAUSS_500["GAUSS_50009"]
+ "principal: %s" % g_opts.principal)
address = g_opts.principal.split('/')[1].split('@')[0]
self.__clearEnvironmentVariableValue()
# SET variable KRB5_CONFIG
cmd = "echo \"export MPPDB_KRB5_FILE_PATH=%s/krb5.conf\"" \
" >> %s" % (os.path.dirname(g_opts.mpprcFile),
g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"]
% cmd + "Error:\n%s." % output)
g_logger.log("Config environment variable KRB5_CONFIG "
"successfully.")
# SET variable PGKRBSRVNAME
cmd = "echo \"export PGKRBSRVNAME=%s\" >>%s" % \
(principals[0], g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"]
% cmd + "Error:\n%s." % output)
g_logger.log("Config environment variable PGKRBSRVNAME "
"successfully.")
# SET variable KRBHOSTNAME
cmd = "echo \"export KRBHOSTNAME=%s\" >>%s" % \
(address, g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"]
% cmd + "Error:\n%s." % output)
g_logger.log("Config environment variable KRBHOSTNAME "
"successfully.")
# SET variable KRB5RCACHETYPE
cmd = "echo \"export KRB5RCACHETYPE=none\" >>%s" % \
(g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"]
% cmd + "Error:\n%s." % output)
g_logger.log("Config environment variable KRB5RCACHETYPE "
"successfully.")
except Exception as e:
raise Exception(ErrorCode.GAUSS_518["GAUSS_51804"]
% "" + "Error:%s." % str(e))
else:
if isRollBack:
self.__clearEnvironmentVariableValue(False)
else:
self.__clearEnvironmentVariableValue(True)
def __configPostgresql(self, isUninstall):
'''
function: set config postgresql file
'''
g_logger.log("start set config postgresql file")
instanceList = []
if(not isUninstall):
self.__cooConfig["krb_server_keyfile"] = "'" + g_opts.keytab + "'"
self.__dataConfig["krb_server_keyfile"] = "'" + g_opts.keytab + "'"
self.__gtmConfig["gtm_authentication_type"] = "gss"
self.__gtmConfig["gtm_krb_server_keyfile"] = "'" + g_opts.keytab \
+ "'"
self.__cmsConfig["cm_auth_method"] = "gss"
self.__cmsConfig["cm_krb_server_keyfile"] = "'" + g_opts.keytab + \
"'"
else:
self.__cooConfig["krb_server_keyfile"] = ""
self.__dataConfig["krb_server_keyfile"] = ""
self.__gtmConfig["gtm_authentication_type"] = "trust"
self.__gtmConfig["gtm_krb_server_keyfile"] = "''"
self.__cmsConfig["cm_auth_method"] = "trust"
self.__cmsConfig["cm_krb_server_keyfile"] = "''"
# get coordinators instance
for cooInst in g_opts.dbNodeInfo.coordinators:
instanceList.append(cooInst)
# get datanode instance
for dnInst in g_opts.dbNodeInfo.datanodes:
instanceList.append(dnInst)
# get gtm instance
for gtmInst in g_opts.dbNodeInfo.gtms:
instanceList.append(gtmInst)
# get cms instance
for cmsInst in g_opts.dbNodeInfo.cmservers:
instanceList.append(cmsInst)
if(len(instanceList) == 0):
return
try:
#config instance in paralle
pool = ThreadPool(DefaultValue.getCpuSet())
pool.map(self.__configInst, instanceList)
pool.close()
pool.join()
except Exception as e:
raise Exception(str(e))
g_logger.log("successfully set config postgresql file")
def __configPgHba(self, isUninstall):
'''
set pg_hba.conf file
'''
g_logger.log("start config pg_hba file")
try:
# get current node information
hostName = DefaultValue.GetHostIpOrName()
self.__dbNodeInfo = g_clusterInfo.getDbNodeByName(hostName)
if (self.__dbNodeInfo is None):
raise Exception(ErrorCode.GAUSS_516["GAUSS_51620"] % "local"
+ " There is no host named %s." % hostName)
#getall node names
nodenames = g_clusterInfo.getClusterNodeNames()
for nodename in nodenames:
nodeinfo = g_clusterInfo.getDbNodeByName(nodename)
self.__allIps += nodeinfo.backIps
self.__allIps += nodeinfo.sshIps
for inst in nodeinfo.cmservers:
self.__allIps += inst.haIps
self.__allIps += inst.listenIps
for inst in nodeinfo.coordinators:
self.__allIps += inst.haIps
self.__allIps += inst.listenIps
for inst in nodeinfo.datanodes:
self.__allIps += inst.haIps
self.__allIps += inst.listenIps
for inst in nodeinfo.gtms:
self.__allIps += inst.haIps
self.__allIps += inst.listenIps
# set local ip 127.0.0.1
self.__allIps += ['127.0.0.1']
# get all ips. Remove the duplicates ips
self.__allIps = DefaultValue.Deduplication(self.__allIps)
# build ip string list
#set Kerberos ip
principals = g_opts.principal.split("/")
principals = principals[1].split("@")
# Every 1000 records merged into one"
ipstring = ""
j = 0
for ip in self.__allIps:
j += 1
if not isUninstall:
ipstring += " -h 'host all all " \
" %s/32 gss " \
"include_realm=1 krb_realm=%s'" % \
(ip, principals[1])
else:
ipstring += " -h 'host all all " \
" %s/32 %s'" % (ip, METHOD_TRUST)
if ipstring != "":
self.__IpStringList.append(ipstring)
#write config hba
self.__writeConfigHba()
except Exception as e:
raise Exception(ErrorCode.GAUSS_530["GAUSS_53024"]
+ "Error:%s." % str(e))
g_logger.debug("Instance information about local node:\n%s." %
str(self.__dbNodeInfo))
g_logger.log("successfully config pg_hba file")
def __configDNPgHba(self, isUninstall):
'''
set DN pg_hba.conf file for replication channel
'''
g_logger.log("start config pg_hba file for database node replication "
"channel")
try:
principals = g_opts.principal.split("/")
principals = principals[1].split("@")
ipstring = ""
if (not isUninstall):
ipstring += " -h 'host replication %s " \
" ::1/128 gss include_realm=1" \
" krb_realm=%s'" % \
(g_opts.user, principals[1])
else:
ipstring += " -h 'host replication %s " \
" ::1/128 %s'" % \
(g_opts.user, METHOD_TRUST)
if (ipstring != ""):
self.__DNStringList.append(ipstring)
self.__writeDNConfigHba()
except Exception as e:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"]
% ("database node config for pg_hba.conf. %s"
% str(e)))
g_logger.log("successfully config pg_hba file for database node "
"replication channel")
def __configInst(self, dbInst):
"""
function: Modify a parameter of postgresql.conf
input : typename, datadir, configFile, parmeterDict
output: NA
"""
configFile = os.path.join(dbInst.datadir, "postgresql.conf")
if (dbInst.instanceRole != DefaultValue.INSTANCE_ROLE_GTM and
dbInst.instanceRole != DefaultValue.INSTANCE_ROLE_CMSERVER and
not os.path.isfile(configFile)):
return
if dbInst.instanceRole == DefaultValue.INSTANCE_ROLE_COODINATOR:
# modifying CN configuration.
g_logger.log("Modify CN %s configuration." % dbInst.instanceId)
configFile = os.path.join(dbInst.datadir, "postgresql.conf")
# Set default value for each inst
tempCommonDict = self.__cooConfig
self.__setConfigItem(DefaultValue.INSTANCE_ROLE_COODINATOR,
dbInst.datadir, tempCommonDict)
if dbInst.instanceRole == DefaultValue.INSTANCE_ROLE_DATANODE:
# modifying database node configuration.
g_logger.log("Modify database node %s configuration."
% dbInst.instanceId)
# Set default value for each inst
tempCommonDict = self.__dataConfig
try:
self.__setConfigItem(DefaultValue.INSTANCE_ROLE_DATANODE,
dbInst.datadir, tempCommonDict)
except Exception as e:
raise Exception(str(e))
if dbInst.instanceRole == DefaultValue.INSTANCE_ROLE_GTM:
# modifying GTM configuration.
g_logger.log("Modify GTM %s configuration." % dbInst.instanceId)
# Set default value for each inst
tempCommonDict = self.__gtmConfig
try:
self.__setConfigItem(DefaultValue.INSTANCE_ROLE_GTM,
dbInst.datadir, tempCommonDict)
except Exception as e:
raise Exception(str(e))
if dbInst.instanceRole == DefaultValue.INSTANCE_ROLE_CMSERVER:
# modifying CMSERVER configuration.
g_logger.log("Modify CMserver %s configuration."
% dbInst.instanceId)
# Set default value for each inst
tempCommonDict = self.__cmsConfig
try:
self.__setConfigItem(DefaultValue.INSTANCE_ROLE_CMSERVER,
dbInst.datadir, tempCommonDict)
except Exception as e:
raise Exception(str(e))
def __setConfigItem(self, typename, datadir, parmeterDict):
"""
function: Modify a parameter
input : typename, datadir, parmeterDict
output: NA
"""
# build GUC parameter string
gucstr = ""
for entry in list(parmeterDict.items()):
if entry[1] == "":
gucstr += " -c \"%s=\'\'\"" % (entry[0])
else:
gucstr += " -c \"%s=%s\"" % (entry[0], entry[1])
# check the GUC parameter string
if gucstr == "":
return
if typename == DefaultValue.INSTANCE_ROLE_DATANODE or \
typename == DefaultValue.INSTANCE_ROLE_COODINATOR:
cmd = "source '%s'; gs_guc set -D %s %s" % \
(g_opts.mpprcFile, datadir, gucstr)
DefaultValue.retry_gs_guc(cmd)
if self.__gsdbStatus():
cmd = "source '%s'; gs_guc reload -D %s %s" % \
(g_opts.mpprcFile, datadir, gucstr)
try:
DefaultValue.retry_gs_guc(cmd)
except Exception as e:
raise Exception(str(e))
else:
cmd = "source '%s'; gs_guc set -N all -I all %s" % \
(g_opts.mpprcFile, gucstr)
DefaultValue.retry_gs_guc(cmd)
def __gsdbStatus(self):
"""
function: get gaussdb process
input: NA
output: True/False
"""
cmd = "ps ux | grep -v '\<grep\>' | grep '%s/bin/gaussdb'" % \
g_clusterInfo.appPath
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0 and output:
raise Exception("Get gaussdb process failed." +
"The cmd is %s " % cmd)
if output:
return True
return False
def __writeConfigHba(self):
"""
function: set hba config
input : NA
output: NA
"""
instances = self.__dbNodeInfo.datanodes + \
self.__dbNodeInfo.coordinators
#Determine whether this node containing CN, DN instance
if(len(instances) == 0):
g_logger.debug("The count number of coordinator and "
"datanode on local node is zero.")
return
try:
pool = ThreadPool(DefaultValue.getCpuSet())
pool.map(self.__configAnInstance, instances)
pool.close()
pool.join()
except Exception as e:
raise Exception(str(e))
def __writeDNConfigHba(self):
"""
function: set hba config for dn replication channel
input : NA
output: NA
"""
instances = self.__dbNodeInfo.datanodes
if (len(instances) == 0):
g_logger.debug("The count number of datanode "
"on local node is zero.")
return
try:
pool = ThreadPool(DefaultValue.getCpuSet())
pool.map(self.__configAnInstanceHA, instances)
pool.close()
pool.join()
except Exception as e:
raise Exception(str(e))
def __configAnInstance(self, instance):
# check instance data directory
if (instance.datadir == "" or not os.path.isdir(instance.datadir)):
if(g_ignorepgHbaMiss):
g_logger.debug("Failed to obtain data directory of "
"the instance[%s]." % str(instance))
return
else:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"] %
("data directory of the instance[%s]" %
str(instance)))
# check pg_hba.conf
hbaFile = "%s/pg_hba.conf" % instance.datadir
if(g_ignorepgHbaMiss and not os.path.isfile(hbaFile)):
g_logger.debug("The %s does not exist." % hbaFile)
return
# do gs_guc to add host into pg_hba.conf
self.__addHostToFile(instance.datadir)
def __configAnInstanceHA(self, instance):
instanceRole = "datanode"
# check instance data directory
if (instance.datadir == "" or not os.path.isdir(instance.datadir)):
if(g_ignorepgHbaMiss):
g_logger.debug("Failed to obtain data directory "
"of the instance[%s]." % str(instance))
return
else:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"] %
("data directory of the instance[%s]"
% str(instance)))
# check pg_hba.conf
hbaFile = "%s/pg_hba.conf" % instance.datadir
if(g_ignorepgHbaMiss and not os.path.isfile(hbaFile)):
g_logger.debug("The %s does not exist." % hbaFile)
return
# do gs_guc to add host into pg_hba.conf
self.__addDNhostToFile(instanceRole, instance.datadir)
def __addHostToFile(self, instanceDataPath):
# do gs_guc to add host into pg_hba.conf
for IpString in self.__IpStringList:
cmd = "source '%s';gs_guc set -D %s %s" % (g_opts.mpprcFile,
instanceDataPath,
IpString)
DefaultValue.retry_gs_guc(cmd)
def __addDNhostToFile(self, dbInstanceRole, instanceDataPath):
"""
function: set DN config postgresql file for replication channel
input:dbInstanceRole, instanceDataPath
output:NA
"""
if (dbInstanceRole == "datanode"):
for IpDNString in self.__DNStringList:
cmd = "source '%s';gs_guc set -D %s %s" % \
(g_opts.mpprcFile, instanceDataPath, IpDNString)
DefaultValue.retry_gs_guc(cmd)
def __executeJob(self):
"""
function:call TGT from kinit's tool
input:NA
output:NA
"""
try:
kinitPath = "%s/bin/kinit" % g_opts.krbHomePath
kcmd = 'export LD_LIBRARY_PATH=%s/lib:$LD_LIBRARY_PATH;' \
'export KRB5_CONFIG=$MPPDB_KRB5_FILE_PATH;%s -k -t %s %s' \
% (g_opts.krbHomePath, kinitPath,
g_opts.keytab, g_opts.principal)
cmd = 'source %s; %s' % (g_opts.mpprcFile, kcmd)
(status, output) = DefaultValue.retryGetstatusoutput(cmd)
if(status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
+ " Output: \n%s" % str(output))
g_logger.debug("Get ticket successfully.")
#set cron
self.__setCron()
except Exception as e:
raise Exception("Call TGT from kinit's tool: %s." % cmd +
" Exception: \n%s" % str(e))
def __setCron(self):
"""
function: Set linux cron
input : NA
output: NA
"""
g_logger.log("Set CRON.")
cronFile = "%s/gauss_cron_%d" % \
(DefaultValue.getTmpDirFromEnv(g_opts.user), os.getpid())
setCronCmd = "crontab -l > %s && " % cronFile
setCronCmd += "sed -i '/^.*kinit.*$/d' '%s'; " % cronFile
setCronCmd += '''echo '*/1 * * * * source '%s';''' \
'''export LD_LIBRARY_PATH='%s'/lib:$LD_LIBRARY_PATH;''' \
'''export KRB5_CONFIG=$MPPDB_KRB5_FILE_PATH ''' % \
(g_opts.mpprcFile, g_opts.krbHomePath)
setCronCmd += '''klistcmd="'%s'/bin/klist";''' % (g_opts.krbHomePath)
setCronCmd += '''kinitcmd="'%s'/bin/kinit -k -t %s %s ";''' % \
(g_opts.krbHomePath, g_opts.keytab, g_opts.principal)
setCronCmd += '''klistresult=`$klistcmd>>/dev/null 2>&1;echo $?`;'''
setCronCmd += '''if [ $klistresult -ne 0 ];then `$kinitcmd`;else ''' \
'''expiresTime=`$klistcmd|grep krbtgt|awk -F " " ''' \
'''"{print \\\\\\$2}"`;startTime=`$klistcmd|grep ''' \
'''krbtgt|awk -F " " "{print \\\\\\$1}"`;''' \
'''currentTime=`date +\%%s`;currentTime=''' \
'''`date +\%%s`;if [ $[`date -d "$expiresTime"''' \
''' +\%%s`-$currentTime] -le 300 ] || [ $[`date -d''' \
''' "$startTime" +\%%s`-$currentTime] -ge 0 ];''' \
'''then `$kinitcmd`;fi;fi;>>/dev/null 2>&1''' \
'''& ' >> %s ;''' % (cronFile)
setCronCmd += "crontab %s&&" % cronFile
setCronCmd += "rm -f '%s'" % cronFile
g_logger.debug("Command for setting CRON: %s" % setCronCmd)
(status, output) = subprocess.getstatusoutput(setCronCmd)
if(status != 0):
raise Exception(ErrorCode.GAUSS_508["GAUSS_50801"]
+ " Error: \n%s." % str(output)
+ "The cmd is %s " % setCronCmd)
cmd = "source %s;export LD_LIBRARY_PATH=%s/lib:$LD_LIBRARY_PATH;" \
"export KRB5_CONFIG=$MPPDB_KRB5_FILE_PATH;" % \
(g_opts.mpprcFile, g_opts.krbHomePath)
cmd += "klistcmd='%s/bin/klist';" % (g_opts.krbHomePath)
cmd += "kinitcmd='%s/bin/kinit -k -t %s %s';" % \
(g_opts.krbHomePath, g_opts.keytab, g_opts.principal)
cmd += "klistresult=`$klistcmd>>/dev/null 2>&1;echo $?`;"
cmd += "if [ $klistresult -ne 0 ];then `$kinitcmd`;" \
"else expiresTime=`$klistcmd|grep krbtgt|" \
"awk -F ' ' '{print \$2}'`;" \
"startTime=`$klistcmd|grep krbtgt|awk -F ' ' '{print \$1}'`;" \
"currentTime=`date +\%s`;currentTime=`date +\%s`;" \
"if [ $[`date -d $expiresTime +\%s`-$currentTime] -le 300 ] " \
"|| [ $[`date -d $startTime +\%s`-$currentTime] -ge 0 ];" \
"then `$kinitcmd`;fi;fi;>>/dev/null 2>&1"
(status, output) = subprocess.getstatusoutput(cmd)
if(status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
"Error:\n%s" % str(output))
g_logger.log("Successfully Set CRON.")
def __setServiceCron(self):
g_logger.log("Set CRON.")
cronFile = "%s/gauss_cron_%d" % \
(DefaultValue.getTmpDirFromEnv(g_opts.user), os.getpid())
setCronCmd = "crontab -l > %s && " % cronFile
setCronCmd += "sed -i '/^.*krb5kdc.*$/d' '%s'; " % cronFile
setCronCmd += '''echo "*/1 * * * * source %s; ''' \
'''kdc_pid_list=\`ps ux | grep -E krb5kdc| ''' \
'''grep -v grep | awk '{print \\\\\\$2}'\` && ''' \
'''(if [ X\"\$kdc_pid_list\" == X\"\" ]; ''' \
'''then krb5kdc; fi) " >> %s; ''' % \
(g_opts.mpprcFile, cronFile)
setCronCmd += "crontab %s && " % cronFile
setCronCmd += "rm -f '%s'" % cronFile
g_logger.debug("Command for setting CRON: %s" % setCronCmd)
(status, output) = subprocess.getstatusoutput(setCronCmd)
if(status != 0):
raise Exception(ErrorCode.GAUSS_508["GAUSS_50801"]
+ " Error: \n%s." % str(output)
+ "The cmd is %s " % setCronCmd)
g_logger.log("Successfully Set CRON.")
def __cancelCron(self):
"""
function: clean kerberos_monitor process and delete cron
input : NA
output: NA
"""
g_logger.log("Deleting kerberos monitor.")
try:
# Remove cron
crontabFile = "%s/gauss_crontab_file_%d" % \
(DefaultValue.getTmpDirFromEnv(g_opts.user),
os.getpid())
cmd = "crontab -l > %s; " % crontabFile
cmd += "sed -i '/^.*kinit.*$/d' '%s'; " % crontabFile
cmd += "crontab '%s';" % crontabFile
cmd += "rm -f '%s'" % crontabFile
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
#no need raise error here, user can do it manually.
g_logger.debug("Failed to delete regular tasks. Error: \n%s"
" You can do it manually." % str(output))
g_logger.debug("The cmd is %s " % cmd)
cmd = "source '%s';export LD_LIBRARY_PATH=%s/lib:" \
"$LD_LIBRARY_PATH;export KRB5_CONFIG=" \
"$MPPDB_KRB5_FILE_PATH;%s/bin/kdestroy" % \
(g_opts.mpprcFile, g_opts.krbHomePath, g_opts.krbHomePath)
(status, output) = DefaultValue.retryGetstatusoutput(cmd)
if (status != 0):
g_logger.debug("Failed to delete ticket. Error: \n%s" %
str(output))
g_logger.debug("The cmd is %s " % cmd)
except Exception as e:
raise Exception(str(e))
g_logger.log("Successfully deleted kerberos OMMonitor.")
def __copyConf(self, src_dir, dest_dir, file_list):
for config_file in file_list:
src_path = os.path.realpath(os.path.join(src_dir, config_file))
if (not os.path.isfile(src_path)):
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % src_path)
dest_path = os.path.realpath(os.path.join(dest_dir, config_file))
try:
shutil.copy(src_path, dest_path)
except Exception as e:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50214"] % src_path)
g_logger.log("Copy server config files successfully.")
def __initKadm5Conf(self, dest_dir):
kadm5_file = os.path.realpath(os.path.join(dest_dir, "kadm5.acl"))
cmd = "sed -i 's/#realms#/HUAWEI.COM/g' %s" % kadm5_file
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] % kadm5_file
+ output)
g_logger.log("Initialize \"kadm5.acl\" successfully.")
def __initKrb5Conf(self, dest_dir, dest_file='krb5.conf'):
krb5_file = os.path.realpath(os.path.join(dest_dir, dest_file))
kdc_ip = g_opts.dbNodeInfo.backIps[0]
kdc_port = 21732
krb_conf = g_opts.gausshome_kerberso
gausslog = DefaultValue.getEnvironmentParameterValue("GAUSSLOG", "")
if not gausslog:
raise Exception(ErrorCode.GAUSS_518["GAUSS_51802"] % "GAUSSLOG")
cmd = "sed -i 's/#kdc_ip#/%s/g' %s && \
sed -i 's/#kdc_ports#/%d/g' %s && \
sed -i 's;#krb_conf#;%s;g' %s && \
sed -i 's;#GAUSSHOME#;%s;g' %s" % \
(kdc_ip, krb5_file,
kdc_port, krb5_file,
g_opts.gausshome_kerberso, krb5_file,
gausslog, krb5_file)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] % krb5_file
+ output)
kerberoslog = os.path.join(gausslog, "kerberos")
cmd = "if [ ! -d '%s' ]; then mkdir -p '%s' -m %s; fi" % (kerberoslog,
kerberoslog, DefaultValue.KEY_DIRECTORY_MODE)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] % krb5_file
+ output)
g_logger.log("Initialize \"krb5.conf\" successfully.")
def __initKdcConf(self, dest_dir):
self.__initKrb5Conf(dest_dir, "kdc.conf")
kdc_file = os.path.realpath(os.path.join(dest_dir, "kdc.conf"))
cmd = "sed -i 's;#KRB_HOME#;%s;g' %s" % (g_opts.gausshome, kdc_file)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] % kdc_file
+ output)
g_logger.log("Initialize \"kdc.conf\" successfully.")
def __initMppdbSite(self, dest_dir):
mppdb_site_file = os.path.realpath(os.path.join(dest_dir,
"mppdb-site.xml"))
principal = "%s/<EMAIL> " % g_opts.user
cmd = "sed -i 's;#mppdb.kerberos.principal#;%s;g' %s" % \
(principal, mppdb_site_file)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] %
mppdb_site_file + output)
cmd = "sed -i 's;#KRB_HOME#;%s;g' %s" % (g_opts.gausshome,
mppdb_site_file)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] %
mppdb_site_file + output)
kdc_conf = os.path.realpath(os.path.join(g_opts.gausshome_kerberso,
"kdc.conf"))
cmd = "sed -i 's;#KRB_CONFIG#;%s;g' %s" % (kdc_conf, mppdb_site_file)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] %
mppdb_site_file + output)
keytab = os.path.realpath(os.path.join(g_opts.gausshome_kerberso,
"%s.keytab" % g_opts.user))
cmd = "sed -i 's;#mppdb.kerberos.keytab#;%s;g' %s" % (keytab,
mppdb_site_file)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_502["GAUSS_50205"] %
mppdb_site_file + output)
g_logger.log("Initialize \"mppdb-site.xml\" successfully.")
def __configKrb5(self, isUninstall, isServer=False):
"""
function: config specify krb5.conf
input: isUninstall, isServer
output: NA
"""
destfile = "%s/krb5.conf" % os.path.dirname(g_opts.mpprcFile)
if not isUninstall:
if isServer:
# 1.copy conf files to GAUSSHOME/kerberos
CONFIG_LIST = ["kadm5.acl", "kdc.conf", "krb5.conf"]
src_path = os.path.realpath(os.path.join(g_opts.gausshome,
"etc", "kerberos"))
self.__copyConf(src_path, g_opts.gausshome_kerberso,
CONFIG_LIST)
# 2.initialize conf files
self.__initKadm5Conf(g_opts.gausshome_kerberso)
self.__initKrb5Conf(g_opts.gausshome_kerberso)
self.__initKdcConf(g_opts.gausshome_kerberso)
else:
#1. copy "krb5.conf"
if (os.path.isfile(g_opts.krbConfigPath)):
shutil.copy(g_opts.krbConfigPath, destfile)
else:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50228"] %
g_opts.krbConfigPath)
#2. change cache file path of kerberos
if(not os.path.isdir("%s/auth_config" %
os.path.dirname(g_opts.mpprcFile))):
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] %
("%s/auth_config" %
os.path.dirname(g_opts.mpprcFile)))
cmd = "sed -i '/default_realm.*/i default_ccache_name = " \
"FILE:%s/auth_config/krb5cc_%s' '%s'" % \
(os.path.dirname(g_opts.mpprcFile),
pwd.getpwnam(g_opts.user).pw_uid, destfile)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception("Config 'krb5.conf' failed.cmd: %s" % cmd)
g_logger.log("Client Config \"krb5.conf\" successfully.")
else:
if os.path.isfile(destfile):
os.remove(destfile)
g_logger.log("Clear \"krb5.conf\" successfully.")
def __initUser(self):
# create kerberos database
kerberos_database_file = \
os.path.realpath(os.path.join(g_opts.gausshome,
"var", "krb5kdc", "principal"))
if os.path.isfile(kerberos_database_file):
g_logger.debug("kerberos database has existed.")
else:
dir_permission = 0o700
os.makedirs(os.path.dirname(kerberos_database_file),
mode=dir_permission)
with open("/dev/random", 'rb') as fp:
srp = fp.read(16)
passwd = int(srp.hex(), 16)
cmd = "source %s && kdb5_util create -r HUAWEI.COM -s -P %s" % \
(g_opts.mpprcFile, str(passwd))
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] %
"kdb5_util")
g_logger.debug("Create kerberos database successfully.")
# create kerberos database user
cmd = "source %s && kadmin.local -q \"addprinc " \
"-randkey %s/huawei.huawei.com\"" % \
(g_opts.mpprcFile, g_opts.user)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + output)
g_logger.debug("Create kerberos database user successfully.")
# create kerberos keytab
cmd = "source %s && kadmin.local -q \"ktadd -k %s/%s.keytab " \
"%s/huawei.huawei.com@HUAWEI.COM\"" % \
(g_opts.mpprcFile, g_opts.gausshome_kerberso,
g_opts.user, g_opts.user)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + output)
g_logger.debug("Create kerberos keytab successfully.")
g_logger.log("Initialize kerberos user successfully.")
def __startServer(self):
# start kdc
cmd = "source %s && krb5kdc" % g_opts.mpprcFile
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + output)
g_logger.debug("Start kerberos kdc successfully.")
g_logger.log("Start kerberos server successfully.")
def __distributeKeyAndSite(self):
hostlist = []
for hostName in g_sshTool.hostNames:
if hostName != g_opts.dbNodeInfo.name:
hostlist.append(hostName)
g_logger.debug("Distribute nodes: %s" % ",".join(hostlist))
# distribute keytab
dest_kerberos_dir = os.path.dirname(g_opts.gausshome_kerberso) + '/'
g_sshTool.scpFiles(g_opts.gausshome_kerberso,
dest_kerberos_dir, hostlist)
# create auth_config
mppdb_site_dir = os.path.join(os.path.dirname(g_opts.mpprcFile),
"auth_config")
cmd = "if [ ! -d '%s' ]; then mkdir %s; fi" % (mppdb_site_dir,
mppdb_site_dir)
g_sshTool.executeCommand(cmd, "create auth_config directory",
DefaultValue.SUCCESS, g_sshTool.hostNames,
g_opts.mpprcFile)
# copy mppdb-site.xml
src_path = os.path.realpath(os.path.join(g_opts.gausshome, "etc",
"kerberos", "mppdb-site.xml"))
if not os.path.isfile(src_path):
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % src_path)
dest_path = os.path.realpath(os.path.join(mppdb_site_dir,
"mppdb-site.xml"))
try:
shutil.copy(src_path, dest_path)
except Exception as e:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50214"] % src_path)
# init mppdb-site.xml
self.__initMppdbSite(mppdb_site_dir)
# distribute mppdb-site.xml
g_sshTool.scpFiles(dest_path, dest_path, hostlist)
def __restartOMmonitor(self):
"""
function: restart OM_monitor for new environment variable
input: NA
output: NA
"""
#1. find om_monitor process
DefaultValue.KillAllProcess(g_opts.user, "om_monitor")
g_logger.log("Kill om_monitor successfully.")
cmd = "source /etc/profile;source '%s';%s/bin/om_monitor " \
"-L %s/%s/cm/om_monitor >> /dev/null 2>&1 &" % \
(g_opts.mpprcFile, g_clusterInfo.appPath,
g_clusterInfo.logPath, g_opts.user)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
g_logger.debug("The cmd is %s " % cmd)
g_logger.debug("Start om_monitor process failed.")
g_logger.debug("Error:%s\n" % output)
g_logger.log("Restart om_monitor succeed.")
def __cleanAuthConfig(self):
auth_config = os.path.join(os.path.dirname(g_opts.mpprcFile),
"auth_config")
if os.path.isdir(auth_config):
try:
shutil.rmtree(auth_config)
except Exception as e:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50207"] %
auth_config)
logPath = DefaultValue.getUserLogDirWithUser(g_opts.user)
kerberosLog = "%s/kerberos" % logPath
if os.path.exists(kerberosLog):
g_file.removeDirectory(kerberosLog)
g_logger.log("Clean auth config directory succeed.")
def __cleanServer(self):
if os.path.isdir(g_opts.gausshome_kerberso):
try:
shutil.rmtree(g_opts.gausshome_kerberso)
except Exception as e:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50207"] %
g_opts.gausshome_kerberso)
krb_data = "%s/var/krb5kdc" % g_opts.gausshome
if os.path.isdir(krb_data):
try:
shutil.rmtree(krb_data)
except Exception as e:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50207"] % krb_data)
# Remove cron
crontabFile = "%s/gauss_crontab_file_%d" % \
(DefaultValue.getTmpDirFromEnv(g_opts.user), os.getpid())
cmd = "crontab -l > %s; " % crontabFile
cmd += "sed -i '/^.*krb5kdc.*$/d' '%s'; " % crontabFile
cmd += "crontab '%s';" % crontabFile
cmd += "rm -f '%s'" % crontabFile
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
#no need raise error here, user can do it manually.
g_logger.debug("The cmd is %s " % cmd)
g_logger.debug("Failed to delete regular tasks. Error: \n%s "
"You can do it manually." % str(output))
cmd = "source /etc/profile; source '%s' && \
proc_pid_list=`ps ux | grep -E 'krb5kdc'| \
grep -v 'grep'|awk '{print \$2}'` && \
(if [ X\"$proc_pid_list\" != X\"\" ]; \
then echo \"$proc_pid_list\" | xargs -r -n 100 kill -9 ; fi)" \
% (g_opts.mpprcFile)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + output)
g_logger.log("Clean Server files and process succeed.")
def __install(self):
try:
self.__configKrb5(False, g_opts.server)
self.__setUserEnvVariable(False, g_opts.server)
self.__triggerJob(False, g_opts.server)
if not g_opts.server:
self.__configPostgresql(False)
self.__configPgHba(False)
self.__configDNPgHba(False)
self.__restartOMmonitor()
g_logger.log("Successfully start Kerberos Authentication.")
except Exception as e:
self.__rollback(g_opts.server)
raise e
finally:
pass
def __uninstall(self, isRollBack=False):
self.__configKrb5(True)
self.__setUserEnvVariable(True, False, isRollBack)
self.__triggerJob(True)
self.__configPostgresql(True)
self.__configPgHba(True)
self.__configDNPgHba(True)
if not isRollBack:
self.__cleanAuthConfig()
self.__cleanServer()
g_logger.log("Successfully close Kerberos Authentication.")
def run(self):
'''
function: call start or stop
'''
if(g_opts.action == "install"):
self.__install()
elif(g_opts.action == "uninstall"):
self.__uninstall(False)
else:
raise Exception(ErrorCode.GAUSS_500["GAUSS_50000"] % g_opts.action)
def parseCommandLine():
"""
function: Check parameter from command line
input : NA
output: NA
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "m:U:",
["help", "krb-server", "krb-client"])
except Exception as e:
usage()
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50000"] % str(e))
if(len(args) > 0):
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50000"] %
str(args[0]))
global g_opts
g_opts = CmdOptions()
for (key, value) in opts:
if (key == "--help"):
usage()
sys.exit(0)
elif(key == "-m"):
g_opts.action = value
elif (key == "-U"):
g_opts.user = value
elif (key == "--krb-server"):
g_opts.server = True
elif (key == "--krb-client"):
g_opts.client = True
if g_opts.action == 'install':
if not g_opts.server and not g_opts.client:
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] %
"-krb-server' or '--krb-client")
if g_opts.server and g_opts.client:
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50005"] %
("-krb-server", "-krb-client"))
def usage():
'''
python3 KerberosUtility.py is a utility to config a {0} cluster.
Usage:
KerberosUtility.py -m install -U USER --krb-server
KerberosUtility.py -m install -U USER --krb-client
KerberosUtility.py -m uninstall -U USER
General options:
-m "install" will set kerberos config for {0} cluster,
"uninstall" will cancel to set kerberos config for {0} cluster.
-U Cluster User for {0} cluster
Install options:
--krb-server Execute install for server.
This parameter only work for install
--krb-client Execute install for client.
This parameter only work for install
Notes:
--krb-server and --krb-client can only chose one
'''
print(usage.__doc__)
if __name__ == '__main__':
"""
main function
"""
try:
parseCommandLine()
initGlobals()
except Exception as e:
GaussLog.exitWithError(str(e))
try:
kbs = Kerberos()
kbs.run()
sys.exit(0)
except Exception as e:
g_logger.logExit(str(e))
| StarcoderdataPython |
1821471 | <reponame>wahyutirta/CNN-numpy
from lenet5 import *
import numpy as np
import matplotlib.pyplot as plt
def plotimage(imgs):
# create figure
fig = plt.figure(figsize=(4, 7))
rows = 3
columns = 2
counter = 1
for img in imgs:
fig.add_subplot(rows, columns, counter)
title = str("feature " + str(counter))
plt.imshow(img)
plt.axis('off')
plt.title(title)
counter += 1
plt.legend()
#plt.savefig('FMAP.png', dpi=300)
plt.show()
mainPath = os.path.dirname(os.path.abspath(__file__)) #file path main.py
workPath = os.path.split(mainPath) #path working folder (whole file project)
imagePath = "data_jepun"
data = Data(workPath, imagePath)
X_train, trainLabel, fNameTrain ,X_test, testLabel, fNameTest = data.load()
kelas = data.jum_kelas
len_label = trainLabel.shape[0]
Y_train = np.zeros((len_label,kelas))
Y_train[np.arange(len_label), trainLabel[range(0, len_label)]] = 1
kelas = data.jum_kelas
len_label = testLabel.shape[0]
Y_test = np.zeros((len_label, kelas))
Y_test[np.arange(len_label), testLabel[range(0, len_label)]] = 1
method = "adam"
epochs = 201
batch = 32
learningRate = 0.0001
mode = "test"
if mode == "train":
mylenet = LENET5(X_train, Y_train, X_test, Y_test, method=method,epochs=epochs, batch=batch, learningRate=learningRate )
layer_time = []
start = timeit.default_timer()
mylenet.lenet_train(method=method, epochs=epochs, batch=batch, learningRate=learningRate, zeta=0)
stop = timeit.default_timer()
print("Training time:", stop - start)
print("Training ", end="")
mylenet.save_parameters(mainPath)
imgpath= "C:/Users/ASUS/Documents/py/cnn-numpy/data_jepun/bali/bali_(2).jpg"
temp = os.path.split(imgpath)
prob = mylenet.one_image(mylenet.layers, imgpath )
print("\nFile Name ::", temp[1], " Tipe bunga ::", data.labelName[np.argmax(prob)], "||" ,
"confidence ::", prob[0,np.argmax(prob)])
acc, loss, time = mylenet.lenet_predictions(mylenet, mylenet.layers,X_test, Y_test, fNameTest, data.labelName)
mylenet.printpred(acc, loss, time)
elif mode == "test":
mylenet = LENET5([], [], [], [], method=method,epochs=epochs, batch=batch, learningRate=learningRate )
imgpath= "C:/Users/ASUS/Documents/py/cnn-numpy/data_jepun/Plumeria_rubra_L_cendana/cendana_(1).jpg"
temp = os.path.split(imgpath)
""" load training history """
mylenet.load_train_details(mainPath=mainPath,epochs=epochs,method=method, batch=batch, learningRate=learningRate )
""" testing one image """
print("Params: batch=", batch, " learning rate=", learningRate, "method=", method, "epochs=", epochs)
mylenet.load_parameters(mainPath=mainPath,epochs=epochs,method=method, batch=batch, learningRate=learningRate)
#acc, loss, time = mylenet.lenet_predictions(mylenet, mylenet.layers,X_test, Y_test,fNameTest, data.labelName)
#mylenet.printpred(acc, loss, time)
#prob = mylenet.one_image(mylenet.layers, imgpath )
#print("\nFile Name ::", temp[1], " Tipe bunga ::", data.labelName[np.argmax(prob)], "||" ,
#"confidence ::", prob[0,np.argmax(prob)])
feature = mylenet.displayFeature(mylenet.layers, imgpath, 1)
img = feature.astype(np.uint8)
plotimage(img)
| StarcoderdataPython |
4934920 | # Model Imports
from project.reports.models import Report
from project.users.models import User
from project.posts.models import Post, Comment
# Library Imports
from django.contrib.contenttypes.models import ContentType
# Util Imports
from project.users.utils import UserUtil
from project.posts.utils import PostUtil, CommentUtil
def get_latest_reports():
return Report.objects.all().order_by("-created_at")[0:50]
def approve_report(report):
if report.reportable_type == ContentType.objects.get_for_model(User):
UserUtil.ban_user(report.reportable_id)
elif report.reportable_type == ContentType.objects.get_for_model(Post):
post = PostUtil.get_post_or_exception(report.reportable_id)
post.delete()
elif report.reportable_type == ContentType.objects.get_for_model(Comment):
comment = CommentUtil.get_comment_or_exception(report.reportable_id)
comment.delete()
else:
raise Exception("Invalid take action request")
| StarcoderdataPython |
8048169 | <gh_stars>0
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import pytest
from aiohttp import web
from pytest_simcore.helpers.utils_assert import assert_error, assert_status
from pytest_simcore.helpers.utils_login import NewInvitation, NewUser, parse_link
from servicelib.rest_responses import unwrap_envelope
from simcore_service_webserver.db_models import ConfirmationAction, UserStatus
from simcore_service_webserver.login.cfg import cfg, get_storage
from simcore_service_webserver.login.registration import get_confirmation_info
EMAIL, PASSWORD = "<EMAIL>", "password"
async def test_regitration_availibility(client):
url = client.app.router["auth_register"].url_for()
r = await client.post(
url, json={"email": EMAIL, "password": PASSWORD, "confirm": PASSWORD,}
)
await assert_status(r, web.HTTPOk)
async def test_regitration_is_not_get(client):
url = client.app.router["auth_register"].url_for()
r = await client.get(url)
await assert_error(r, web.HTTPMethodNotAllowed)
async def test_registration_with_existing_email(client):
db = get_storage(client.app)
url = client.app.router["auth_register"].url_for()
async with NewUser() as user:
r = await client.post(
url,
json={
"email": user["email"],
"password": user["raw_password"],
"confirm": user["raw_password"],
},
)
await assert_error(r, web.HTTPConflict, cfg.MSG_EMAIL_EXISTS)
@pytest.mark.skip("TODO: Feature still not implemented")
async def test_registration_with_expired_confirmation(client, monkeypatch):
monkeypatch.setitem(cfg, "REGISTRATION_CONFIRMATION_REQUIRED", True)
monkeypatch.setitem(cfg, "REGISTRATION_CONFIRMATION_LIFETIME", -1)
db = get_storage(client.app)
url = client.app.router["auth_register"].url_for()
async with NewUser({"status": UserStatus.CONFIRMATION_PENDING.name}) as user:
confirmation = await db.create_confirmation(
user, ConfirmationAction.REGISTRATION.name
)
r = await client.post(
url,
json={
"email": user["email"],
"password": user["<PASSWORD>"],
"confirm": user["<PASSWORD>"],
},
)
await db.delete_confirmation(confirmation)
await assert_error(r, web.HTTPConflict, cfg.MSG_EMAIL_EXISTS)
async def test_registration_without_confirmation(client, monkeypatch):
monkeypatch.setitem(cfg, "REGISTRATION_CONFIRMATION_REQUIRED", False)
db = get_storage(client.app)
url = client.app.router["auth_register"].url_for()
r = await client.post(
url, json={"email": EMAIL, "password": PASSWORD, "confirm": PASSWORD}
)
data, error = unwrap_envelope(await r.json())
assert r.status == 200, (data, error)
assert cfg.MSG_LOGGED_IN in data["message"]
user = await db.get_user({"email": EMAIL})
assert user
await db.delete_user(user)
async def test_registration_with_confirmation(client, capsys, monkeypatch):
monkeypatch.setitem(cfg, "REGISTRATION_CONFIRMATION_REQUIRED", True)
db = get_storage(client.app)
url = client.app.router["auth_register"].url_for()
r = await client.post(
url, json={"email": EMAIL, "password": PASSWORD, "confirm": PASSWORD}
)
data, error = unwrap_envelope(await r.json())
assert r.status == 200, (data, error)
user = await db.get_user({"email": EMAIL})
assert user["status"] == UserStatus.CONFIRMATION_PENDING.name
assert "verification link" in data["message"]
# retrieves sent link by email (see monkeypatch of email in conftest.py)
out, err = capsys.readouterr()
link = parse_link(out)
assert "/auth/confirmation/" in str(link)
resp = await client.get(link)
text = await resp.text()
assert "welcome to fake web front-end" in text
assert resp.status == 200
user = await db.get_user({"email": EMAIL})
assert user["status"] == UserStatus.ACTIVE.name
await db.delete_user(user)
@pytest.mark.parametrize(
"is_invitation_required,has_valid_invitation,expected_response",
[
(True, True, web.HTTPOk),
(True, False, web.HTTPForbidden),
(False, True, web.HTTPOk),
(False, False, web.HTTPOk),
],
)
async def test_registration_with_invitation(
client, is_invitation_required, has_valid_invitation, expected_response,
):
from servicelib.application_keys import APP_CONFIG_KEY
from simcore_service_webserver.login.config import CONFIG_SECTION_NAME
client.app[APP_CONFIG_KEY][CONFIG_SECTION_NAME] = {
"registration_confirmation_required": False,
"registration_invitation_required": is_invitation_required,
}
#
# User gets an email with a link as
# https:/some-web-address.io/#/registration/?invitation={code}
#
# Front end then creates the following request
#
async with NewInvitation(client) as confirmation:
print(get_confirmation_info(confirmation))
url = client.app.router["auth_register"].url_for()
r = await client.post(
url,
json={
"email": EMAIL,
"password": PASSWORD,
"confirm": PASSWORD,
"invitation": confirmation["code"]
if has_valid_invitation
else "WRONG_CODE",
},
)
await assert_status(r, expected_response)
# check optional fields in body
if not has_valid_invitation or not is_invitation_required:
r = await client.post(
url, json={"email": "new-user" + EMAIL, "password": PASSWORD}
)
await assert_status(r, expected_response)
if is_invitation_required and has_valid_invitation:
db = get_storage(client.app)
assert not await db.get_confirmation(confirmation)
if __name__ == "__main__":
pytest.main([__file__, "--maxfail=1"])
| StarcoderdataPython |
62906 | import random
class Node:
def __init__(self, k):
self.key = k
self.parent = None
self.left = None
self.right = None
def insert(root, n):
# n is new node
# return root
y = None
x = root
while x is not None:
y = x
if n.key < x.key:
x = x.left
else:
x = x.right
n.parent = y
if y is None:
return n # root is None
elif n.key < y.key:
y.left = n
else:
y.right = n
return root
def gen_numbers(size):
return random.sample(list(range(size)), size)
def gen_tree(numbers):
root = None
for n in numbers:
root = insert(root, Node(n))
return root
def inorder_tree_walk(root):
if root is not None:
inorder_tree_walk(root.left)
print(root.key, end=' ')
inorder_tree_walk(root.right)
def search(root, k):
if root is None or k == root.key:
return root
if k < root.key:
return search(root.left, k)
return search(root.right, k)
def minimum(root):
x = root
while x.left is not None:
x = x.left
return x
def maximum(root):
x = root
while x.right is not None:
x = x.right
return x
def successor(n):
if n.right is not None:
return minimum(n.right)
y = n.parent
while y is not None and n is y.right:
n = y
y = y.parent
return y
def predecessor(n):
if n.left is not None:
return maximum(n.left)
y = n.parent
while y is not None and n is y.left:
n = y
y = y.parent
return y
def transplant(root, u, v):
if u.parent is None:
return v
if u is u.parent.left:
u.parent.left = v
else:
u.parent.right = v
if v is not None:
v.parent = u.parent
return root
def delete(root, n):
if n.left is None:
root = transplant(root, n, n.right)
elif n.right is None:
root = transplant(root, n, n.left)
else:
y = minimum(n.right)
if y.parent is not n:
root = transplant(root, y, y.right)
y.right = n.right
y.right.parent = y
root = transplant(root, n, y)
y.left = n.left
y.left.parent = y
return root
def sample_gen_tree():
s = 10
numbers = gen_numbers(s)
print(numbers)
root = gen_tree(numbers)
print(root.key)
def sample_print_sorted_keys():
s = 10
numbers = gen_numbers(s)
print(numbers)
root = gen_tree(numbers)
inorder_tree_walk(root)
def sample_search():
s = 10
numbers = gen_numbers(s)
print(numbers)
root = gen_tree(numbers)
print(search(root, 3))
print(search(root, 11))
def sample_delete():
s = 10
numbers = gen_numbers(s)
print(numbers)
root = gen_tree(numbers)
inorder_tree_walk(root)
print()
delete_numbers = gen_numbers(s)
for n in delete_numbers:
print(f'delete = {n}')
node = search(root, n)
root = delete(root, node)
inorder_tree_walk(root)
print()
if __name__ == '__main__':
# sample_gen_tree()
# sample_print_sorted_keys()
# sample_search()
sample_delete()
pass
| StarcoderdataPython |
6544440 | from room import Room
from item import Item
from character import Character , Enemy , Friend
from rpginfo import RPGinfo
# no underscore = public
# self.my_attribute = None
# single underscore = protected
# self._my_attribute = None
# double underscore = private
# self.__my_attribute = None
#Static and class methods belongs to a class, whereas an instance methods belongs to the object.
spooksville = RPGinfo("Monster Mash")
spooksville.welcome()
RPGinfo.info()
RPGinfo.author = "Raspberry Pi Foundation"
print("-----------------------------------------------------------")
print("Commands that can be used:\nsouth , east , west , north , \ntake , hug , fight , inventory , use")
kitchen = Room("Kitchen")#Setting the self.room_name as Kitchen for the kitchen object
ballroom = Room("Ballroom")
dining_hall = Room("Dinning Hall")
kitchen.desc=("A bright marble floored modern day kitchen , with metal pans lining the walls")
ballroom.desc=("A dimly lit , large ballroom with massive a chandelier looming above")
dining_hall.desc=("A large table absorbs the dining room , with large banners and pictures surronding the room")
#Linking rooms
kitchen.link_room(dining_hall,"south")
dining_hall.link_room(kitchen,"north")
dining_hall.link_room(ballroom,"west")
ballroom.link_room(dining_hall,"east")
current_room = kitchen
#Defining Items
bag = []
acid = Item("Acid","Weapon")
key = Item("Rusty Key","Quest")
potion = Item("Red Potion","Health")
acid._desc = "A toxic green acid that is breaking through its container slightly"
key._desc = 'A old rust covered gate key '
potion._desc = "A blood red solution in a large flask with a cork screw sealing it in "
#Setting Characters to rooms
roomy =Friend("Roomy","A short living mushroom with bright red cheeks")
roomy.set_conversation("Hi there seen any other fungi around...")
skeletor=Enemy('Skeletor',"A tall slender skeleton with a battle axe and shield ")
skeletor.set_conversation("Greetings... A humannnn , I can't believe it mmm...")
skeletor.set_weakness("Acid")
dining_hall._character = skeletor
ballroom._character= roomy
roomy.set_item(acid)
#setting items to rooms
kitchen._item = potion
dining_hall._item = key
game = 1
print("There are " + str(Room.rooms) + " rooms to explore.")
while game >= 1:
print("\n")
current_room.print_name()
current_room.print_desc()
current_room.get_details()
item =current_room._item
inhabitant = current_room._character
if inhabitant is not None :
inhabitant.describe()
if item is not None :
print("There is an item")
item.describe()
command = input("> ")
# Check whether a direction was typed
if command in ["north", "south", "east", "west"]:
current_room = current_room.move(command)
elif command == "talk":
if inhabitant is not None:
inhabitant.talk()
else:
print("No one is here silly")
elif command == 'fight':
if inhabitant is not None:
if isinstance(inhabitant, Friend):
inhabitant.fight()
else:
w = input("Enter what you want to fight with\n:")
if w in bag or w == 'fist':
f = inhabitant.fight(w)
if f == True:
bag.remove(w)
dining_hall._character = None
print("As the bones fall you see a large gate appear on the wall with a rusty key hole")
elif f == False :
game = game - 1
if game <= 0:
print("You have been defeated and the adventutre will end here")
RPGinfo.credits(spooksville.title)
else:
print("The monster defeated you but you had a extra life")
else:
print("You do not have this item")
else:
print("There is no one here to fight")
elif command == 'hug':
if inhabitant is not None and isinstance(inhabitant, Friend):
if inhabitant.item is not None:
it = inhabitant.hug()
bag.append(it._name)
print(str(it._name)+" Has been added to your bag")
print(bag)
inhabitant.item = None
else:
print("["+inhabitant.name+" says]: Thats too much now")
elif inhabitant is not None:
print(inhabitant.name+" doesn't want to hug you , shame")
else:
print("There is no one to hug *cries inside*")
elif command == 'take':
if item is not None and len(bag)<= 10:
bag.append(item._name)
space = 10 - len(bag)
print("Item taken \nSpace left: "+str(space))
current_room._item = None
else:
print("No items to take")
elif command == 'inventory':
if len(bag) == 0:
print("--Empty--")
else:
print(bag)
space = 10- len(bag)
print("This much space left: "+str(space))
elif command == 'use':
if len(bag) != 0:
print(bag)
i = str(input("Please enter the item to use as its dispalyed in your bag\n:"))
if i in bag:
if i== 'Rusty Key' and dining_hall._character is None:
print("You use the rusty key to open the large gate ,\nYou are free")
RPGinfo.credits(spooksville.title)
game = 0
if i == 'Red Potion':
game = game + 1
print("You feel a strange feeling almost like you have gained a extra life")
bag.remove(i)
if i == 'Acid':
print("You pour the acid on yourself\nNext time try using it in battle instead")
bag.remove(i)
game = game -1
if game == 0:
print("You have been defeated and the adventutre will end here")
RPGinfo.credits(spooksville.title)
else:
print("You had a extra life and remain alive")
else:
print(i+" Is not in your bag")
else:
print("You have no items")
| StarcoderdataPython |
8104907 | def make_sandwich(*args):
print("These ingredients are in your sandwich:")
for arg in args:
print(f"- {arg}")
sandwich = ['lettuce', 'tomatoe', 'rocks']
make_sandwich('lettuce', 'tomatoe', 'potatoe', 'spinach')
make_sandwich('olives', 'ham')
make_sandwich(*sandwich)
| StarcoderdataPython |
125454 | <filename>Cyborg Currency.py
amount = int(input('''Copyright © 2021 <NAME>
CURRENCY CONVERTER.lk
---------------------
Please enter amount(LKR): '''))
con_currency = input("Please enter convert currency: ")
if con_currency.upper() == "USD":
converted = amount * 200
print(f"🛑🛑🛑 {amount} LKR is {converted} 💲")
if con_currency.upper() == "EUR":
converted = amount * 239
print(f"🛑🛑🛑 {amount} LKR is {converted} EUR")
if con_currency.upper() == "GBP":
converted = amount * 280
print(f"🔴🔴🔴 {amount} LKR is {converted} GBP")
| StarcoderdataPython |
8108365 | <gh_stars>10-100
import json
import requests
import datetime
class ultraChatBot():
def __init__(self, json):
self.json = json
self.dict_messages = json['data']
self.ultraAPIUrl = 'https://api.ultramsg.com/{{instance_id}}/'
self.token = '{{token}}'
def send_requests(self, type, data):
url = f"{self.ultraAPIUrl}{type}?token={self.token}"
headers = {'Content-type': 'application/json'}
answer = requests.post(url, data=json.dumps(data), headers=headers)
return answer.json()
def send_message(self, chatID, text):
data = {"to" : chatID,
"body" : text}
answer = self.send_requests('messages/chat', data)
return answer
def send_image(self, chatID):
data = {"to" : chatID,
"image" : "https://file-example.s3-accelerate.amazonaws.com/images/test.jpeg"}
answer = self.send_requests('messages/image', data)
return answer
def send_video(self, chatID):
data = {"to" : chatID,
"video" : "https://file-example.s3-accelerate.amazonaws.com/video/test.mp4"}
answer = self.send_requests('messages/video', data)
return answer
def send_audio(self, chatID):
data = {"to" : chatID,
"audio" : "https://file-example.s3-accelerate.amazonaws.com/audio/2.mp3"}
answer = self.send_requests('messages/audio', data)
return answer
def send_voice(self, chatID):
data = {"to" : chatID,
"audio" : "https://file-example.s3-accelerate.amazonaws.com/voice/oog_example.ogg"}
answer = self.send_requests('messages/voice', data)
return answer
def send_contact(self, chatID):
data = {"to" : chatID,
"contact" : "<EMAIL>"}
answer = self.send_requests('messages/contact', data)
return answer
def time(self, chatID):
t = datetime.datetime.now()
time = t.strftime('%Y-%m-%d %H:%M:%S')
return self.send_message(chatID, time)
def welcome(self,chatID, noWelcome = False):
welcome_string = ''
if (noWelcome == False):
welcome_string = "Hi , welcome to WhatsApp chatbot using Python\n"
else:
welcome_string = """wrong command
Please type one of these commands:
*hi* : Saluting
*time* : show server time
*image* : I will send you a picture
*video* : I will send you a Video
*audio* : I will send you a audio file
*voice* : I will send you a ppt audio recording
*contact* : I will send you a contact
"""
return self.send_message(chatID, welcome_string)
def Processingـincomingـmessages(self):
if self.dict_messages != []:
message =self.dict_messages
text = message['body'].split()
if not message['fromMe']:
chatID = message['from']
if text[0].lower() == 'hi':
return self.welcome(chatID)
elif text[0].lower() == 'time':
return self.time(chatID)
elif text[0].lower() == 'image':
return self.send_image(chatID)
elif text[0].lower() == 'video':
return self.send_video(chatID)
elif text[0].lower() == 'audio':
return self.send_audio(chatID)
elif text[0].lower() == 'voice':
return self.send_voice(chatID)
elif text[0].lower() == 'contact':
return self.send_contact(chatID)
else:
return self.welcome(chatID, True)
else: return 'NoCommand'
| StarcoderdataPython |
239918 | from setuptools import setup
setup(
name='removestyles',
version='0.0.1',
py_modules=['removestyles'],
install_requires=['ass>=0.5.1'],
entry_points={
"console_scripts": ["removestyles=removestyles:main"]
}
)
| StarcoderdataPython |
12856837 | from slackclient import SlackClient
import requests
import os
from Config import slack_env_var_token, slack_username
"""
These functions take care of sending slack messages and emails
"""
def slack_chat_messenger(message):
# NEVER LEAVE THE TOKEN IN YOUR CODE ON GITHUB, EVERYBODY WOULD HAVE ACCESS TO THE CHANNEL!
slack_token = os.environ.get(slack_env_var_token)
slack_client = SlackClient(slack_token)
api_call = slack_client.api_call("im.list")
user_slack_id = slack_username
# You should either know the user_slack_id to send a direct msg to the user
if api_call.get('ok'):
for im in api_call.get("ims"):
if im.get("user") == user_slack_id:
im_channel = im.get("id")
slack_client.api_call("chat.postMessage", channel=im_channel, text=message, as_user=False)
def slack_chat_attachments(filepath):
slack_chat_messenger('Trying to send you {}'.format(filepath))
slack_token = os.environ.get(slack_env_var_token)
my_file = {
'file': (filepath+'.png', open(filepath+'.png', 'rb'), 'image/png', {
'Expires': '0'
})
}
payload = {
"filename":filepath+'.png',
"token":slack_token,
"channels": ['@Fede'],
"media": my_file
}
r = requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
print(r.text)
def upload_file( filepath ):
"""Upload file to channel
Note:
URLs can be constructed from:
https://api.slack.com/methods/files.upload/test
"""
slack_chat_messenger('Trying to send you {}'.format(filepath))
slack_token = os.environ.get(slack_env_var_token)
data = {}
data['token'] = slack_token
data['file'] = filepath
data['filename'] = filepath
data['channels'] = [slack_username]
data['display_as_bot'] = True
filepath = data['file']
files = {
'content': (filepath, open(filepath, 'rb'), 'image/png', {
'Expires': '0'
})
}
data['media'] = files
response = requests.post(
url='https://slack.com/api/files.upload',
data=data,
headers={'Accept': 'application/json'},
files=files)
print(response.text)
def send_email_attachments(filename, filepath):
import smtplib
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Create the container (outer) email message.
msg = MIMEMultipart()
msg['Subject'] = filename
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = '<EMAIL>'
msg['To'] = '<EMAIL>'
body = "Analysis results"
msg.attach(MIMEText(body, 'plain'))
with open(filepath+'.png', 'rb') as fp:
img = MIMEImage(fp.read())
msg.attach(img)
# Send the email via our own SMTP server.
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login('<EMAIL>', '')
server.sendmail('<EMAIL>', '<EMAIL>', msg.as_string())
server.quit()
| StarcoderdataPython |
11363176 | <reponame>bikramtuladhar/covid-19-procurement-explorer-admin
# Generated by Django 3.1.2 on 2020-12-10 08:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("country", "0006_tender_no_of_bidders"),
]
operations = [
migrations.CreateModel(
name="Buyer",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("buyer_id", models.CharField(max_length=50, verbose_name="Buyer ID")),
("buyer_name", models.CharField(blank=True, max_length=250, null=True, verbose_name="Buyer name")),
(
"buyer_address",
models.CharField(blank=True, max_length=250, null=True, verbose_name="Buyer address"),
),
],
),
migrations.CreateModel(
name="GoodsServicesCategory",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("category_name", models.CharField(max_length=50, unique=True, verbose_name="Category name")),
("category_desc", models.TextField(blank=True, null=True, verbose_name="Category description")),
],
),
migrations.AddField(
model_name="supplier",
name="supplier_address",
field=models.CharField(blank=True, max_length=250, null=True, verbose_name="Supplier address"),
),
migrations.AlterField(
model_name="supplier",
name="supplier_id",
field=models.CharField(default="NULL", max_length=50, verbose_name="Supplier ID"),
preserve_default=False,
),
migrations.AlterField(
model_name="supplier",
name="supplier_name",
field=models.CharField(blank=True, max_length=250, null=True, verbose_name="Supplier name"),
),
migrations.AlterField(
model_name="tender",
name="contract_desc",
field=models.TextField(blank=True, null=True, verbose_name="Contract description"),
),
migrations.AlterField(
model_name="tender",
name="contract_title",
field=models.TextField(blank=True, null=True, verbose_name="Contract title"),
),
migrations.AlterField(
model_name="tender",
name="contract_value_local",
field=models.FloatField(blank=True, null=True, verbose_name="Contract value local"),
),
migrations.AlterField(
model_name="tender",
name="contract_value_usd",
field=models.FloatField(blank=True, null=True, verbose_name="Contract value USD"),
),
migrations.AlterField(
model_name="tender",
name="data_source",
field=models.CharField(blank=True, max_length=250, null=True, verbose_name="Data source"),
),
migrations.AlterField(
model_name="tender",
name="link_to_contract",
field=models.CharField(blank=True, max_length=250, null=True, verbose_name="Link to contract"),
),
migrations.AlterField(
model_name="tender",
name="link_to_tender",
field=models.CharField(blank=True, max_length=250, null=True, verbose_name="Link to tender"),
),
migrations.AlterField(
model_name="tender",
name="supplier",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tenders",
to="country.supplier",
),
),
migrations.CreateModel(
name="GoodsServices",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"classification_code",
models.CharField(blank=True, max_length=25, null=True, verbose_name="Classification code"),
),
("no_of_bidders", models.BigIntegerField(blank=True, null=True, verbose_name="Number of bidders")),
("contract_title", models.TextField(blank=True, null=True, verbose_name="Contract title")),
("contract_desc", models.TextField(blank=True, null=True, verbose_name="Contract description")),
("tender_value_local", models.FloatField(blank=True, null=True, verbose_name="Tender value local")),
("tender_value_usd", models.FloatField(blank=True, null=True, verbose_name="Tender value USD")),
("award_value_local", models.FloatField(blank=True, null=True, verbose_name="Award value local")),
("award_value_usd", models.FloatField(blank=True, null=True, verbose_name="Award value USD")),
(
"contract_value_local",
models.FloatField(blank=True, null=True, verbose_name="Contract value local"),
),
("contract_value_usd", models.FloatField(blank=True, null=True, verbose_name="Contract value USD")),
(
"contract",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="goods_services",
to="country.tender",
),
),
(
"goods_services_category",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="goods_services",
to="country.goodsservicescategory",
),
),
],
),
migrations.AddField(
model_name="tender",
name="buyer",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tenders",
to="country.buyer",
),
),
]
| StarcoderdataPython |
3453547 | <filename>scripts/mnist.py
#!/usr/bin/env python
#
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Implement attention sampling for classifying MNIST digits."""
import argparse
import json
from os import path
from keras import backend as K
from keras.callbacks import Callback, ModelCheckpoint
from keras.datasets import mnist
from keras.layers import Input, Conv2D, AveragePooling2D, GlobalMaxPooling2D, \
Dense
from keras.models import Model, Sequential
from keras.optimizers import SGD, Adam
from keras.utils import Sequence
import numpy as np
from skimage.io import imsave
from ats.core import attention_sampling
from ats.utils.layers import L2Normalize, SampleSoftmax, ResizeImages, \
TotalReshape
from ats.utils.regularizers import multinomial_entropy
from ats.utils.training import Batcher
class MNIST(Sequence):
"""Load a Megapixel MNIST dataset. See make_mnist.py."""
def __init__(self, dataset_dir, train=True):
with open(path.join(dataset_dir, "parameters.json")) as f:
self.parameters = json.load(f)
filename = "train.npy" if train else "test.npy"
N = self.parameters["n_train" if train else "n_test"]
W = self.parameters["width"]
H = self.parameters["height"]
scale = self.parameters["scale"]
self._high_shape = (H, W, 1)
self._low_shape = (int(scale*H), int(scale*W), 1)
self._data = np.load(path.join(dataset_dir, filename))
def __len__(self):
return len(self._data)
def __getitem__(self, i):
if i >= len(self):
raise IndexError()
# Placeholders
x_low = np.zeros(self._low_shape, dtype=np.float32).ravel()
x_high = np.zeros(self._high_shape, dtype=np.float32).ravel()
# Fill the sparse representations
data = self._data[i]
x_low[data[0][0]] = data[0][1]
x_high[data[1][0]] = data[1][1]
# Reshape to their final shape
x_low = x_low.reshape(self._low_shape)
x_high = x_high.reshape(self._high_shape)
return [x_low, x_high], data[2]
class AttentionSaver(Callback):
def __init__(self, output, att_model, data):
self._att_path = path.join(output, "attention_{:03d}.png")
self._patches_path = path.join(output, "patches_{:03d}_{:03d}.png")
self._att_model = att_model
(self._x, self._x_high), self._y = data[0]
self._imsave(
path.join(output, "image.png"),
self._x[0, :, :, 0]
)
def on_epoch_end(self, e, logs):
att, patches = self._att_model.predict([self._x, self._x_high])
self._imsave(self._att_path.format(e), att[0])
np.save(self._att_path.format(e)[:-4], att[0])
for i, p in enumerate(patches[0]):
self._imsave(self._patches_path.format(e, i), p[:, :, 0])
def _imsave(self, filepath, x):
x = (x*65535).astype(np.uint16)
imsave(filepath, x, check_contrast=False)
def get_model(outputs, width, height, scale, n_patches, patch_size, reg):
# Define the shapes
shape_high = (height, width, 1)
shape_low = (int(height*scale), int(width*scale), 1)
# Make the attention and feature models
attention = Sequential([
Conv2D(8, kernel_size=3, activation="tanh", padding="same",
input_shape=shape_low),
Conv2D(8, kernel_size=3, activation="tanh", padding="same"),
Conv2D(1, kernel_size=3, padding="same"),
SampleSoftmax(squeeze_channels=True, smooth=1e-5)
])
feature = Sequential([
Conv2D(32, kernel_size=7, activation="relu", input_shape=shape_high),
Conv2D(32, kernel_size=3, activation="relu"),
Conv2D(32, kernel_size=3, activation="relu"),
Conv2D(32, kernel_size=3, activation="relu"),
GlobalMaxPooling2D(),
L2Normalize()
])
# Let's build the attention sampling network
x_low = Input(shape=shape_low)
x_high = Input(shape=shape_high)
features, attention, patches = attention_sampling(
attention,
feature,
patch_size,
n_patches,
replace=False,
attention_regularizer=multinomial_entropy(reg)
)([x_low, x_high])
y = Dense(outputs, activation="softmax")(features)
return (
Model(inputs=[x_low, x_high], outputs=[y]),
Model(inputs=[x_low, x_high], outputs=[attention, patches])
)
def get_optimizer(args):
optimizer = args.optimizer
if optimizer == "sgd":
return SGD(lr=args.lr, momentum=args.momentum, clipnorm=args.clipnorm)
elif optimizer == "adam":
return Adam(lr=args.lr, clipnorm=args.clipnorm)
raise ValueError("Invalid optimizer {}".format(optimizer))
def main(argv):
parser = argparse.ArgumentParser(
description=("Train a model with attention sampling on the "
"artificial mnist dataset")
)
parser.add_argument(
"dataset",
help="The directory that contains the dataset (see make_mnist.py)"
)
parser.add_argument(
"output",
help="An output directory"
)
parser.add_argument(
"--optimizer",
choices=["sgd", "adam"],
default="adam",
help="Choose the optimizer for Q1"
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
help="Set the optimizer's learning rate"
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="Choose the momentum for the optimizer"
)
parser.add_argument(
"--clipnorm",
type=float,
default=1,
help=("Clip the gradient norm to avoid exploding gradients "
"towards the end of convergence")
)
parser.add_argument(
"--patch_size",
type=lambda x: tuple(int(xi) for xi in x.split("x")),
default="50x50",
help="Choose the size of the patch to extract from the high resolution"
)
parser.add_argument(
"--n_patches",
type=int,
default=10,
help="How many patches to sample"
)
parser.add_argument(
"--regularizer_strength",
type=float,
default=0.0001,
help="How strong should the regularization be for the attention"
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="Choose the batch size for SGD"
)
parser.add_argument(
"--epochs",
type=int,
default=500,
help="How many epochs to train for"
)
args = parser.parse_args(argv)
# Load the data
training_dataset = MNIST(args.dataset)
test_dataset = MNIST(args.dataset, train=False)
training_batched = Batcher(training_dataset, args.batch_size)
test_batched = Batcher(test_dataset, args.batch_size)
print("Loaded dataset with the following parameters")
print(json.dumps(training_dataset.parameters, indent=4))
model, att_model = get_model(
outputs=10,
width=training_dataset.parameters["width"],
height=training_dataset.parameters["height"],
scale=training_dataset.parameters["scale"],
n_patches=args.n_patches,
patch_size=args.patch_size,
reg=args.regularizer_strength
)
model.compile(
loss="categorical_crossentropy",
optimizer=get_optimizer(args),
metrics=["accuracy", "categorical_crossentropy"]
)
model.summary()
callbacks = [
AttentionSaver(args.output, att_model, training_batched),
ModelCheckpoint(
path.join(args.output, "weights.{epoch:02d}.h5"),
save_weights_only=True
)
]
model.fit_generator(
training_batched,
validation_data=test_batched,
epochs=args.epochs,
callbacks=callbacks
)
loss, accuracy, ce = model.evaluate_generator(test_batched, verbose=1)
print("Test loss: {}".format(ce))
print("Test error: {}".format(1-accuracy))
if __name__ == "__main__":
main(None)
| StarcoderdataPython |
3375781 | <filename>Exercicios/Extras/U.py
for linha in range(0, 7):
for coluna in range(0, 7):
if (((coluna == 1 or coluna == 5) and linha != 6) or (linha == 6 and coluna > 1 and coluna < 5)):
print('*', end='')
else:
print(' ', end='')
print()
| StarcoderdataPython |
314356 | <reponame>vincentzlt/textprep<gh_stars>10-100
#!/usr/bin/env python
# coding: utf-8
import argparse as ap
import collections as cl
import re
import itertools as it
import json
import os
import sys
import fileinput as fi
def _str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ap.ArgumentTypeError('Boolean value expected.')
def main(args):
vocab2ideos = json.loads(open(args.vocab_decomp).read())
if not args.reverse:
mapping = vocab2ideos
else:
mapping = {v: k for k, v in vocab2ideos.items()}
fout = open(args.output, 'wt') if args.output else sys.stdout
for l in fi.input(args.input):
l_ = ' '.join([mapping[w] for w in l.strip().split()]) + '\n'
fout.write(l_)
if fout is not sys.stdout:
fout.close()
if __name__ == '__main__':
decomp_parser = ap.ArgumentParser()
decomp_parser.add_argument('input', help='the input fname.')
decomp_parser.add_argument('output', nargs='?', help='the output fname.')
decomp_parser.add_argument(
'--reverse',
default=False,
type=_str2bool,
help=
'whether to reverse process the input file. If reverse: compose back'
' to normal text file from input fname and vocab fname. Else: do the '
'normal decomposition.')
decomp_parser.add_argument(
'--vocab_decomp',
type=str,
help='the vocab_decomp fname. in decomp process, vocab file will be '
'generated automatically; in comp process, vocab file must exist to '
'be read from.')
args = decomp_parser.parse_args()
print(args)
main(args)
| StarcoderdataPython |
6495856 | """CSC110 Fall 2021 Prep 9: Programming Exercises
Instructions (READ THIS FIRST!)
===============================
This Python module contains several function headers and descriptions.
We have marked each place you need to fill in with the word "TODO".
As you complete your work in this file, delete each TODO comment.
You do not need to include doctests for this prep, though we strongly encourage you
to check your work carefully!
Copyright and Usage Information
===============================
This file is provided solely for the personal and private use of students
taking CSC110 at the University of Toronto St. George campus. All forms of
distribution of this code, whether as given or with any changes, are
expressly prohibited. For more information on copyright for CSC110 materials,
please consult our Course Syllabus.
This file is Copyright (c) 2021 <NAME>, <NAME>, and <NAME>.
"""
from dataclasses import dataclass
import math
####################################################################################################
# Part 1
#
# Most websites where you can create an account now ask you for a "strong password". The definition
# of strong password varies. In Part 1, you will implement 1 definition of a strong password and
# get some practice with functions that mutate their input.
#
# For those curious, these definitions of a "strong password" don't actually seem to be based on
# evidence. See: https://nakedsecurity.sophos.com/2014/10/24/do-we-really-need-strong-passwords/
#
# If you are still coming up with your own passwords (or, worse, reusing the same password for more
# than one account), please look into getting a password manager.
####################################################################################################
def is_strong_password(password: str) -> bool:
"""Return whether password is a strong password.
A strong password has at least 6 characters, contains at least one lowercase letter, at least
one uppercase letter, and at least one digit.
>>> is_strong_password('<PASSWORD>')
True
>>> is_strong_password('<PASSWORD>')
False
>>> is_strong_password('<PASSWORD>...')
True
>>> is_strong_password('<PASSWORD>..')
False
>>> is_strong_password('<PASSWORD>...')
False
>>> is_strong_password('<PASSWORD>...')
False
>>> is_strong_password('<PASSWORD>...')
False
"""
# Has at least 6 characters
if len(password) < 6:
return False
# Contains at least one lowercase letter
if all(chr(o) not in password for o in range(ord('a'), ord('z') + 1)):
return False
# Contains at least one uppercase letter
if all(chr(o) not in password for o in range(ord('A'), ord('Z') + 1)):
return False
# Contains at least one digit
if all(str(o) not in password for o in range(10)):
return False
return True
def remove_weak_passwords(passwords: list[str]) -> list[str]:
"""Remove and return the weak passwords in the given list of passwords.
A weak password is a password that is not strong.
This function both mutates the given list (to remove the weak passwords)
and returns a new list (the weak passwords that were removed).
"""
# NOTE: This implementation has an error, and does not do what the docstring claims.
# How would you explain the error? (Not to be handed in, but a good question for review!)
# After you have answered this question, comment out this incorrect code.
# weak_passwords = [p for p in passwords if not is_strong_password(p)]
# all_passwords = [p for p in passwords if is_strong_password(p)]
# return weak_passwords
# Next, in the space below, implement this function remove_weak_passwords correctly.
# Warning: do NOT attempt to mutate the passwords list while looping over it
# ("for password in passwords:"). This leads to unexpected errors in Python!
# Instead, first compute the passwords to remove, and then remove each of them
# from the list of passwords. (You can look up the "list.remove" method.)
weak_passwords = [pn for pn in passwords if not is_strong_password(pn)]
for p in weak_passwords:
passwords.remove(p)
return weak_passwords
def test_mutation() -> None:
"""Test that remove_weak_passwords correctly mutates its list argument.
Your test case should have at least one weak password in the input passwords.
"""
passwords = ['<PASSWORD>', '<PASSWORD>']
remove_weak_passwords(passwords)
assert passwords == ['<PASSWORD>']
def test_return() -> None:
"""Test that remove_weak_passwords correctly returns a list of the weak passwords.
Your test case should have at least one weak password in the input passwords.
"""
passwords = ['<PASSWORD>', '<PASSWORD>']
assert remove_weak_passwords(passwords) == ['<PASSWORD>']
####################################################################################################
# Part 2
#
# Ideally, the passwords we use when we create our accounts are not stored in plaintext. Otherwise,
# any hacker who finds their way into a company's server has just gained access to every password
# stored on that server, with no further hacking required. Here, you will implement some functions
# to encrypt passwords.
#
# Further reading (not required for this prep)
# ============================================
#
# Unfortunately, sometimes passwords are not stored securely.
# See: https://www.howtogeek.com/434930/why-are-companies-still-storing-passwords-in-plain-text/
#
# You may also find it interesting that encryption may not be the best method for storing passwords.
# Do a quick search for: hashing vs. encryption. Here are some relevant articles:
# - https://cheapsslsecurity.com/blog/hashing-vs-encryption/
# - https://cybernews.com/security/hashing-vs-encryption/
# - https://www.maketecheasier.com/password-hashing-encryption/
####################################################################################################
def divides(d: int, n: int) -> bool:
"""Return whether d divides n."""
if d == 0:
return n == 0
else:
return n % d == 0
def is_prime(p: int) -> bool:
"""Return whether p is prime."""
possible_divisors = range(2, math.floor(math.sqrt(p)) + 1)
return p > 1 and all({not divides(d, p) for d in possible_divisors})
def phi(n: int) -> int:
"""Return the number of positive integers between 1 and n inclusive that are coprime with n.
You can use the math.gcd function imported from the math module.
Preconditions:
- n >= 1
>>> phi(5)
4
>>> phi(15)
8
>>> phi(1)
1
"""
return sum(math.gcd(i, n) == 1 for i in range(1, n + 1))
@dataclass
class PublicKey:
"""A public key for the RSA cryptosystem.
This is good review of data classes: this data class is another way of representing
and RSA public key, rather than using tuples like we saw in class.
Done: Translate these representation invariants into Python expressions under
"Representation Invariants".
1. n is greater than 1
2. e is between 2 and phi(n) - 1, inclusive
3. e is coprime to phi(n) (again, you can use math.gcd)
Representation Invariants:
- 1 < n
- 2 <= e <= phi(n) - 1
- math.gcd(e, phi(n)) = 1
"""
n: int
e: int
@dataclass
class PrivateKey:
"""A private key for the RSA cryptosystem.
Done: translate these representation invariants into Python expressions under
"Representation Invariants".
1. p and q are both prime
2. p and q are not equal
3. d is between 2 and phi(p * q) - 1, inclusive
Representation Invariants:
- is_prime(p)
- is_prime(q)
- p != q
- 2 <= d <= (q - 1) * (p - 1) - 1
"""
p: int
q: int
d: int
def encrypt_password(public_key: PublicKey, password: str) -> str:
"""Return the password encrypted using public_key and the RSA cryptosystem.
Your implementation should be very similar to the one from class, except now
the public key is a data class rather than a tuple.
"""
return ''.join([chr(pow(ord(c), public_key.e, public_key.n)) for c in password])
def decrypt_password(private_key: PrivateKey, encrypted: str) -> str:
"""Return decrypt the given encrypted password using private_key and the RSA cryptosystem.
Your implementation should be very similar to the one from class, except now
the public key is a data class rather than a tuple.
"""
n = private_key.p * private_key.q
return ''.join([chr(pow(ord(c), private_key.d, n)) for c in encrypted])
def encrypt_passwords(public_key: PublicKey, passwords: list[str]) -> None:
"""Encrypt each password in passwords using the given public_key and the RSA cryptosystem.
This function mutates passwords, and does not return anything.
The encrypted passwords should appear in the same order as the
corresponding original passwords.
"""
for i in range(len(passwords)):
passwords[i] = encrypt_password(public_key, passwords[i])
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={
'max-line-length': 100,
'extra-imports': ['math', 'dataclasses', 'python_ta.contracts'],
'disable': ['R1705', 'C0200']
})
import python_ta.contracts
python_ta.contracts.DEBUG_CONTRACTS = False
python_ta.contracts.check_all_contracts()
import doctest
doctest.testmod(verbose=True)
import pytest
pytest.main(['prep9.py', '-v'])
| StarcoderdataPython |
3434851 | class Solution:
def search(self, nums: List[int], target: int) -> int:
b,e = 0,len(nums)-1
while b<=e:
m = b+((e-b)//2)
if nums[m]==target:
return m
elif nums[m]>target:
e=m-1
else:
b=m+1
return -1
| StarcoderdataPython |
3568064 | <reponame>iguinn/pygama
import numpy as np
from numba import guvectorize
from pygama.dsp.errors import DSPFatal
@guvectorize(["void(float32[:], float32, float32[:], float32[:])",
"void(float64[:], float64, float64[:], float64[:])"],
"(n),()->(),()", nopython=True, cache=True)
def saturation(w_in, bit_depth_in, n_lo_out, n_hi_out):
"""
Count the number of samples in the waveform that are
saturated at the minimum and maximum possible values based
on the bit depth.
Parameters
----------
w_in : array-like
The input waveform
bit_depth_in: int
The bit depth of the analog-to-digital converter
n_lo_out : int
The output number of samples at the minimum
n_hi_out : int
The output number of samples at the maximum
Processing Chain Example
------------------------
"sat_lo, sat_hi": {
"function": "saturation",
"module": "pygama.dsp.processors",
"args": ["waveform", "16", "sat_lo", "sat_hi"],
"unit": "ADC",
"prereqs": ["waveform"]
}
"""
n_lo_out[0] = np.nan
n_hi_out[0] = np.nan
if np.isnan(w_in).any() or np.isnan(bit_depth_in):
return
if not np.floor(bit_depth_in) == bit_depth_in:
raise DSPFatal('The bit depth is not an integer')
if bit_depth_in <= 0:
raise DSPFatal('The bit depth is not positive')
n_lo_out[0] = 0
n_hi_out[0] = 0
for i in range(0, len(w_in), 1):
if w_in[i] == 0:
n_lo_out[0] += 1
elif w_in[i] == np.power(2, int(bit_depth_in)):
n_hi_out[0] += 1
| StarcoderdataPython |
4838879 | # Generated by Django 4.0 on 2022-02-05 13:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_bookinstance_borrower_alter_author_date_of_birth_and_more'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'permissions': [('can_mark_returned', 'Set book as returned'), ('can_mark_on_loan', 'Set book as on loan')]},
),
]
| StarcoderdataPython |
3367937 | <gh_stars>0
"""
This creates Figure 4. Gene expression R2X with flattened matrix dimension reconstruction.
"""
import seaborn as sns
from .figureCommon import subplotLabel, getSetup
from tensorpack import perform_CMTF
from ..dataHelpers import form_tensor, proteinNames
def makeFigure():
""" Get a list of the axis objects and create a figure. """
# Get list of axis objects
ax, f = getSetup((8, 12), (1, 1))
# Add subplot labels
subplotLabel(ax)
makeProteinListDistribution(ax[0])
return f
def makeProteinListDistribution(ax):
""" Create the protein list distribution with components on the x axis, component values on the y axis and outlier proteins labelled """
tensor, rTensor, _, _ = form_tensor()
results = perform_CMTF(tensor, rTensor)
proteinFactors = results[1][0]
proteinList = proteinNames()
comp1Vals = []
comp2Vals = []
comp3Vals = []
comp4Vals = []
comp5Vals = []
comp1 = []
comp2 = []
comp3 = []
comp4 = []
comp5 = []
proteinComp1 = []
proteinComp2 = []
proteinComp3 = []
proteinComp4 = []
proteinComp5 = []
proteinCounter1 = 0
proteinCounter2 = 0
proteinCounter3 = 0
proteinCounter4 = 0
proteinCounter5 = 0
for compVals in proteinFactors:
if(compVals[0] < -.15 or compVals[0] > .15):
comp1Vals.append(compVals[0])
comp1.append("Component 1")
proteinComp1.append(proteinList[proteinCounter1])
proteinCounter1 += 1
if(compVals[1] < -.15 or compVals[1] > .15):
comp2Vals.append(compVals[1])
comp2.append("Component 2")
proteinComp2.append(proteinList[proteinCounter2])
proteinCounter2 += 1
if(compVals[2] < -.15 or compVals[2] > .15):
comp3Vals.append(compVals[2])
comp3.append("Component 3")
proteinComp3.append(proteinList[proteinCounter3])
proteinCounter3 += 1
if(compVals[3] < -.15 or compVals[3] > .15):
comp4Vals.append(compVals[3])
comp4.append("Component 4")
proteinComp4.append(proteinList[proteinCounter4])
proteinCounter4 += 1
if(compVals[4] < -.15 or compVals[4] > .15):
comp5Vals.append(compVals[4])
comp5.append("Component 5")
proteinComp5.append(proteinList[proteinCounter5])
proteinCounter5 += 1
sns.scatterplot(x=comp1, y=comp1Vals, ax=ax, color='blue')
sns.scatterplot(x=comp2, y=comp2Vals, ax=ax, color='orange')
sns.scatterplot(x=comp3, y=comp3Vals, ax=ax, color='yellow')
sns.scatterplot(x=comp4, y=comp4Vals, ax=ax, color='green')
sns.scatterplot(x=comp5, y=comp5Vals, ax=ax, color='red')
for line in range(0, len(comp1Vals)):
if(line % 2 == 0):
ax.text(comp1[line], comp1Vals[line], proteinList[line], horizontalalignment='right', size=6, color='black', weight='semibold')
else:
ax.text(comp1[line], comp1Vals[line], proteinList[line], horizontalalignment='left', size=6, color='black', weight='semibold')
for line in range(0, len(comp2Vals)):
if(line % 2 == 0):
ax.text(comp2[line], comp2Vals[line], proteinList[line], horizontalalignment='right', size=6, color='black', weight='semibold')
else:
ax.text(comp2[line], comp2Vals[line], proteinList[line], horizontalalignment='left', size=6, color='black', weight='semibold')
for line in range(0, len(comp3Vals)):
if(line % 2 == 0):
ax.text(comp3[line], comp3Vals[line], proteinList[line], horizontalalignment='right', size=6, color='black', weight='semibold')
else:
ax.text(comp3[line], comp3Vals[line], proteinList[line], horizontalalignment='left', size=6, color='black', weight='semibold')
for line in range(0, len(comp4Vals)):
if(line % 2 == 0):
ax.text(comp4[line], comp4Vals[line], proteinList[line], horizontalalignment='right', size=6, color='black', weight='semibold')
else:
ax.text(comp4[line], comp4Vals[line], proteinList[line], horizontalalignment='left', size=6, color='black', weight='semibold')
for line in range(0, len(comp5Vals)):
if(line % 2 == 0):
ax.text(comp5[line], comp5Vals[line], proteinList[line], horizontalalignment='right', size=6, color='black', weight='semibold')
else:
ax.text(comp5[line], comp5Vals[line], proteinList[line], horizontalalignment='left', size=6, color='black', weight='semibold')
comp1Vals.clear()
comp2Vals.clear()
comp3Vals.clear()
comp4Vals.clear()
comp5Vals.clear()
comp1.clear()
comp2.clear()
comp3.clear()
comp4.clear()
comp5.clear()
for compVals in proteinFactors:
if(compVals[0] > -.15 or compVals[0] < .15):
comp1Vals.append(compVals[0])
comp1.append("Component 1")
if(compVals[1] > -.15 or compVals[1] < .15):
comp2Vals.append(compVals[1])
comp2.append("Component 2")
if(compVals[2] > -.15 or compVals[2] < .15):
comp3Vals.append(compVals[2])
comp3.append("Component 3")
if(compVals[3] > -.15 or compVals[3] < .15):
comp4Vals.append(compVals[3])
comp4.append("Component 4")
if(compVals[4] > -.15 or compVals[4] < .15):
comp5Vals.append(compVals[4])
comp5.append("Component 5")
sns.scatterplot(x=comp1, y=comp1Vals, ax=ax, color='blue', alpha=.05)
sns.scatterplot(x=comp2, y=comp2Vals, ax=ax, color='orange', alpha=.05)
sns.scatterplot(x=comp3, y=comp3Vals, ax=ax, color='yellow', alpha=.05)
sns.scatterplot(x=comp4, y=comp4Vals, ax=ax, color='green', alpha=.05)
sns.scatterplot(x=comp5, y=comp5Vals, ax=ax, color='red', alpha=.05)
ax.set_title("Component Values for Proteins")
ax.set(xlabel='Components', ylabel='Component Values')
| StarcoderdataPython |
1778166 | <gh_stars>0
from typing import List
class Solution:
# [1, 2, 3, 4] -> [1, l1, l1*l2, l1*l2*l3] -> [l2*l3*l4, (l1*l2)*l3, ...]
def productExceptSelf(self, nums: List[int]) -> List[int]:
res = []
base_num = 1
for num in nums:
res.append(base_num)
base_num = base_num * num
temp_base_num = 1
for i in range(len(nums)-1, -1, -1):
res[i] = res[i] * temp_base_num
temp_base_num = temp_base_num * nums[i]
return res
if __name__ == "__main__":
solution = Solution()
print(solution.productExceptSelf([2,3,4,5]))
print(solution.productExceptSelf([1,2,3,4]))
| StarcoderdataPython |
6596076 | <gh_stars>10-100
#!/usr/bin/env python
__author__ = "<NAME>"
import re
import logging
import os
import shutil
from genomic_tools_lib import Logging, Utilities
def run(args):
if not args.reentrant:
if os.path.exists(args.output_folder):
logging.info("Output path exists. Nope.")
return
Utilities.maybe_create_folder(args.output_folder)
logging.info("Checking input folder")
r = re.compile(args.rule)
folders = [x for x in sorted(os.listdir(args.input_folder)) if r.search(x)]
if args.exclude:
folders = [x for x in folders if not x in {y for y in args.exclude}]
names = {}
for f in folders:
name = r.search(f).group(1)
if not name in names: names[name] = []
names[name].append(os.path.join(args.input_folder, f))
_f = shutil.move if args.move else shutil.copy
for name in sorted(names):
logging.info("Processing %s", name)
output_folder = os.path.join(args.output_folder, name)
Utilities.maybe_create_folder(output_folder)
for input_folder in names[name]:
logging.log(8, "Processing %s", input_folder)
files = os.listdir(input_folder)
for file in files:
i = os.path.join(input_folder, file)
o = os.path.join(output_folder, file)
_f(i, o)
logging.info("Finished collapse")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Convert model training format data to parquet format ")
parser.add_argument("-input_folder", help="Folder where sub folders can be found")
parser.add_argument("-rule", help="Regexp to group input folders")
parser.add_argument("-output_folder", help="Destination folder where contets will be saved")
parser.add_argument("--reentrant", help="Lenient, multiple-run mode", action="store_true")
parser.add_argument("--exclude", help="Skip these folders", nargs="+")
parser.add_argument("--move", help="Wether to move or copy files", action="store_true")
parser.add_argument("-parsimony", help="Log parsimony level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args) | StarcoderdataPython |
5000959 | <reponame>UChicagoSUPERgroup/analytic-password-cracking
""" This file contains classes used across different modules """
from enum import Enum
import os
from pyparsing import srange
class RunningStyle(Enum):
""" An enum that denotes the run time style. Either JtR or Hashcat """
JTR = 0
HC = 1
class FatalRuntimeError(Exception):
""" Exception raised when program runs into fatal errors.
These are errors that will stop the program from running.
"""
def __init__(self, msg):
Exception.__init__(self, msg)
class FilePath():
""" Wraps the path to a file """
def __init__(self, a_path):
if os.path.isfile(a_path) == False:
raise FatalRuntimeError("Path Error: {}".format(a_path))
self.info = {}
abs_path = os.path.abspath(a_path) # get absolute path
self.info['addr'] = abs_path # set full address
self.info['prefix'], self.info['name'] = os.path.split(
abs_path) # get directory and basename
def __getitem__(self, key):
return self.info[key]
class PasswordPolicyConf():
""" Wraps the password policy configuration specified
Attr:
length: whether there's a requirement on length, and what length it is
digit: whether there's a requirement on digit
letter: whether there's a requirement on letter
lower: whether there's a requirement on lowercase letter
upper: whether there's a requirement on uppercase letter
"""
def __init__(self,
length=-1,
digit=False,
letter=False,
lower=False,
upper=False):
if length == 0:
raise Exception("Password Policy Invalid")
if length == None: # no length
length = -1
self.length = length
self.digit = digit
self.letter = letter
self.lower = lower
self.upper = upper
def to_arg_string(self):
""" to string in arg format e.g. (--length=6 --digit --letter)"""
pol = " "
if self.length >= 1:
pol += "--length=" + str(self.length) + " "
if self.digit == True:
pol += "--digit "
if self.letter == True:
pol += "--letter "
if self.upper == True:
pol += "--upper "
if self.lower == True:
pol += "--lower "
return pol if pol != " " else ""
def to_compact_string(self):
""" to string in compact format e.g. (-length=6-digit-letter)"""
pol = ""
if self.length >= 1:
pol += "-length=" + str(self.length)
if self.digit == True:
pol += "-digit"
if self.letter == True:
pol += "-letter"
if self.upper == True:
pol += "-upper"
if self.lower == True:
pol += "-lower"
return pol
def to_rule_string(self, is_jtr):
""" to string in rule format e.g. (>6 /?a /?d), JTR only """
pol = " "
if self.length >= 1:
if is_jtr == True:
if self.length <= 10:
pol += ">" + str(self.length - 1) + " "
else:
pol += ">" + chr(ord('A') + self.length - 11) + " "
else: # hc < means <=, > means >=
if self.length <= 9:
pol += ">" + str(self.length) + " "
else:
pol += ">" + chr(ord('A') + self.length - 10) + " "
if self.digit == True:
pol += "/?d "
if self.letter == True:
pol += "/?a "
if self.upper == True:
pol += "/?u "
if self.lower == True:
pol += "/?l "
return pol if pol != " " else ""
def to_debug_string(self):
""" to_arg_string, if empty return none """
v = self.to_arg_string()
if v == "":
return "None"
else:
return v
def arg_string_to_pw_policy(string):
""" convert from arg string to password policy instance"""
string = string.strip()
if string == "":
return PasswordPolicyConf()
string_split = string.split(" ")
length = -1
digit = False
letter = False
lower = False
upper = False
for val in string_split:
if "--digit" in val:
digit = True
elif "--letter" in val:
letter = True
elif "--lower" in val:
lower = True
elif "--upper" in val:
upper = True
elif val.startswith("--length="):
length = int(val[len("--length="):])
return PasswordPolicyConf(length, digit, letter, lower, upper, symbols)
# Used in AN Command
class Queue:
""" A homemade queue """
def __init__(self):
self.items = []
def empty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
| StarcoderdataPython |
69473 | <reponame>aleksandromelo/Exercicios
cont = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez')
n = int(input('Digite um número entre 0 e 10: '))
print(f'Você digitou o número {cont[n]}.')
| StarcoderdataPython |
3263443 | import pandas as pd
import seaborn as sns
import matplotlib as plt
df = pd.read_csv(r'C:\Users\<NAME>\evolucaoadmit.csv')
print('=================IMPRIMINDO GRAFICO==============', '\n',df)
estado = df.loc[0]
print(estado)
media = int(estado.iloc[3:11].mean())
mediana= int(estado.iloc[3:11].median())
print(media)
print(mediana)
| StarcoderdataPython |
148669 | <gh_stars>0
from typing import List
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution:
"""
给定一个 N 叉树,返回其节点值的后序遍历。
例如,给定一个 3叉树 :
返回其后序遍历: [5,6,3,2,4,1].
说明: 递归法很简单,你可以使用迭代法完成此题吗?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/n-ary-tree-postorder-traversal
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
def postorder(self, root: 'Node') -> List[int]:
if root is not None:
if root.children is not None:
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return result
else:
return []
| StarcoderdataPython |
5168729 |
import socket, sys, argparse, subprocess, time, random
from multiprocessing.pool import ThreadPool
import torch
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def create_server(args, gpu_idx):
'''start a train_server process on a free port, and return the port number'''
port = get_open_port()
python_args = [
'python', 'train_server.py',
'--port', str(port),
'--model', str(args.model),
'--model-seed', str(args.model_seed),
'--domain', str(args.domain),
'--data-seed', str(args.data_seed),
'--batchsize', str(args.batchsize),
'--max-epoch', str(args.max_epoch),
'--patience', str(args.patience),
'--evaluation-set', str(args.evaluation_set),
'--gpu-idx', str(gpu_idx),
]
process = subprocess.Popen(python_args)
return process, port
class TrainClient:
def __init__(self, remote_port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((socket.gethostname(), remote_port))
self.in_use = False
def query(self, order, num_used):
assert not self.in_use, 'client currently in use?'
self.in_use = True
spec = ' '.join(map(str, order + [num_used])) + '<EOS>'
self.sock.sendall(bytearray(spec, encoding='utf-8'))
entire_data = ''
while True:
data = self.sock.recv(16)
entire_data = entire_data + data.decode('utf-8')
if entire_data.endswith('<EOS>'):
break
metric = float(entire_data.replace('<EOS>', ''))
self.in_use = False
return metric
class TrainScheduler:
def __init__(self, args):
self.args = args
self.processes = []
self.ports = []
self.workers_per_gpu = args.workers_per_gpu
if args.use_gpus == 'all':
self.use_gpus = list(range(torch.cuda.device_count()))
else:
self.use_gpus = list(map(int, args.use_gpus.split(',')))
for gpu_idx in self.use_gpus * args.workers_per_gpu:
process, port = create_server(args, gpu_idx)
self.processes.append(process)
self.ports.append(port)
time.sleep(0.1)
if args.model != 'roberta':
time.sleep(10)
elif args.model == 'roberta':
time.sleep(20) # roberta server needs more time for loading pre-trained model
self.clients = []
for port in self.ports:
self.clients.append(TrainClient(port))
self.cache = dict()
def get_free_client(self):
time.sleep(random.random() * 0.05)
in_uses = [c.in_use for c in self.clients]
while sum(in_uses) == len(in_uses):
print('waiting in use')
time.sleep(random.random() * 0.05 + 0.02)
in_uses = [c.in_use for c in self.clients]
idx = in_uses.index(False)
return idx
def query(self, order, num_used):
idx = self.get_free_client()
return self.clients[idx].query(order, num_used)
def evaluate_order(self, order):
num_used = list(range(0, self.args.tot_acq + 1, self.args.batchsize))
results = [None] * len(num_used)
query_args = []
miss_idxs = []
for i, idx in enumerate(num_used):
cur_order = tuple(order[:idx])
if cur_order in self.cache:
results[i] = self.cache[cur_order]
else:
miss_idxs.append(i)
query_args.append((order, idx))
pool = ThreadPool(len(self.use_gpus) * self.workers_per_gpu)
metrics = pool.starmap(self.query, query_args)
for mi, m, q in zip(miss_idxs, metrics, query_args):
assert results[mi] is None
results[mi] = m
cur_order = tuple(order[:q[1]])
assert cur_order not in self.cache
self.cache[cur_order] = m
assert None not in results
pool.close()
pool.join()
return results
def __del__(self):
for p in self.processes:
p.kill()
| StarcoderdataPython |
140222 | from flask import Flask,render_template, request, redirect, url_for, abort, flash, session
import sqlalchemy
from werkzeug.security import generate_password_hash
from flask_login import login_manager, login_user, login_required,logout_user,current_user,LoginManager
from flask_user import roles_required
from datetime import datetime
from models.post_model import Posts
from models.user_model import UserModel
from models.comment_model import Comments
from models.category_model import Category
from models.tag_model import Tags
from models.roles import Role
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine,desc
# class ConfigClass(object):
# """ Flask application config """
# # Flask-Mail SMTP server settings
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 465
# MAIL_USE_SSL = True
# MAIL_USE_TLS = False
# MAIL_USERNAME = '<EMAIL>'
# MAIL_PASSWORD = 'password'
# MAIL_DEFAULT_SENDER = '"MyApp" <<EMAIL>>'
# # Flask-User settings
# USER_APP_NAME = "Flask-User Basic App" # Shown in and email templates and page footers
# USER_ENABLE_EMAIL = True # Enable email authentication
# USER_ENABLE_USERNAME = False # Disable username authentication
# USER_EMAIL_SENDER_NAME = USER_APP_NAME
# USER_EMAIL_SENDER_EMAIL = "<EMAIL>"
app = Flask(__name__)
# app.config.from_object(__name__+'.ConfigClass')
app.config['SECRET_KEY'] = "Thisshouldbesecret!"
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@localhost/flaskapp'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
engine = create_engine('mysql+pymysql://root:@localhost/flaskapp')
Session = sessionmaker(bind = engine)
sess = Session()
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return UserModel.query.get(int(user_id))
# APP ROUTES
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
# remember = True if request.form.get('remember') else False
user = UserModel.query.filter_by(email=email).first()
# if user:
# print('User found')
# if user.check_password(password):
# return redirect(url_for('home'))
# flash('Email or Password Incorrect. Check and Try Again!')
# flash('Email or Password Incorrect. Check and Try Again!')
# return redirect(url_for('login'))
if not user and user.check_password(password):
flash('Email or Password Incorrect. Check and Try Again!')
return redirect(url_for('login'))
login_user(user)
return redirect(url_for('home'))
return render_template('login.html')
@app.route('/logout')
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
session.clear()
return redirect(url_for('index'))
@app.route('/signup',methods=['GET','POST'])
def signup():
if request.method == 'POST':
registeredAt = datetime.now()
firstName = request.form['firstName']
lastName = request.form['lastName']
email = request.form['email']
passwordHash = generate_password_hash(request.form['passwordHash'],method='sha256')
is_admin=False
user = UserModel.query.filter_by(email=email).first()
if user:
flash('User with that email already exists.')
return redirect(url_for('signup'))
new_user=UserModel(firstName,lastName,email,passwordHash,registeredAt,is_admin)
new_user.save_to_db()
return redirect(url_for('login'))
return render_template('sign_up.html')
@app.route('/')
def index():
if not UserModel.query.filter(UserModel.email == '<EMAIL>').first():
passwordHash = generate_password_hash('Admin',method='sha256')
user = UserModel(
firstName='Admin',
lastName='Chamakuvangu',
email='<EMAIL>',
passwordHash=passwordHash,
registeredAt=datetime.now(),
is_admin=True
)
user.roles.append(Role(name='Admin'))
user.roles.append(Role(name='Agent'))
db.session.add(user)
db.session.commit()
posts = Posts.query.order_by(desc(Posts.publishedAt)).all()
categories = Category.query.all()
# users = UserModel.query.all()
if current_user.is_authenticated and current_user.is_admin==False:
return redirect(url_for('home'))
return render_template('index.html', posts = posts,categories = categories)
@app.route('/home')
@login_required
def home():
posts = Posts.query.order_by(desc(Posts.publishedAt)).all()
categories = Category.query.all()
return render_template('home.html', posts = posts,categories = categories, user = current_user)
@app.route('/category/<string:category>')
def category(category):
try:
cat = Category.query.filter_by(category=category).one()
posts = Posts.query.filter_by(category_id = cat.id).all()
return render_template('category.html', posts = posts, user = current_user)
except sqlalchemy.orm.exc.NoResultFound:
abort(404)
@app.route('/tags/<string:tag>')
def tag(tag):
try:
tag = Tags.query.filter_by(tag=tag).one()
posts = Posts.query.filter_by(tag_id = tag.id).all()
return render_template('category.html', posts = posts, user = current_user)
except sqlalchemy.orm.exc.NoResultFound:
abort(404)
@app.route('/contact')
def contact():
user = current_user
return render_template('contact.html',user = user)
@app.route('/post/<string:slug>')
def post(slug):
try:
# users = sess.query(UserModel, Comments).filter(UserModel.id == Comments.user_id).all()
post = Posts.query.filter_by(slug=slug).one()
tags = Tags.query.filter_by(id=post.tag_id).all()
comments = Comments.query.filter_by(post_id=post.id).order_by(desc(Comments.publishedOn)).all()
# tags =
users = UserModel.query.all()
if comments:
return render_template('post.html', post=post,comments =comments, users = users,tags=tags)
return render_template('post.html', post=post,users = users,tags=tags)
except sqlalchemy.orm.exc.NoResultFound:
abort(404)
@app.route('/posts/<string:slug>',methods=['GET','POST'])
@login_required
def posts(slug):
if request.method == 'POST':
publishedOn = datetime.now()
content = request.form['comment']
post = Posts.query.filter_by(slug=slug).one()
post_id = post.id
user_id = current_user.id
new_comment = Comments(content,publishedOn,post_id,user_id)
new_comment.save_to_db()
try:
# users = sess.query(UserModel, Comments).filter(UserModel.id == Comments.user_id).all()
post = Posts.query.filter_by(slug=slug).one()
tags = Tags.query.filter_by(id=post.tag_id).all()
comments = Comments.query.filter_by(post_id=post.id).all()
users = UserModel.query.all()
if comments:
return render_template('posts.html', post=post,comments =comments,tags=tags, users = users)
return render_template('posts.html', post=post,users = users,tags=tags)
except sqlalchemy.orm.exc.NoResultFound:
abort(404)
@app.route('/admin')
@roles_required('Admin')
def admin_page():
return redirect('admin')
@app.route('/about')
def about():
return render_template('about.html',user = current_user)
if __name__ == "__main__":
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from db import db
db.init_app(app)
class Controller(ModelView):
def is_accessible(self):
if current_user.is_authenticated:
if current_user.is_admin == True:
return current_user.is_authenticated
else:
return abort(404)
def not_auth(self):
return redirect(url_for('login'))
admin = Admin(app,name='Control Panel')
admin.add_view(Controller(Posts, db.session))
admin.add_view(Controller(Category, db.session))
admin.add_view(Controller(Comments, db.session))
admin.add_view(Controller(Tags,db.session))
admin.add_view(Controller(UserModel, db.session))
app.run(debug=True, port='5000') #host='0.0.0.0' | StarcoderdataPython |
12827276 | <reponame>whamcloud/iml-agent
from mock import patch
import unittest
from chroma_agent.plugin_manager import DevicePluginManager, ActionPluginManager
from chroma_agent.lib.agent_teardown_functions import agent_daemon_teardown_functions
from chroma_agent.lib.agent_startup_functions import agent_daemon_startup_functions
from chroma_agent.action_plugins.device_plugin import (
initialise_block_device_drivers,
terminate_block_device_drivers,
)
class TestDevicePlugins(unittest.TestCase):
def test_get_device_plugins(self):
"""Test that we get a list of loaded plugin classes."""
self.assertNotEqual(len(DevicePluginManager.get_plugins()), 0)
def test_excluded_plugins(self):
self.assertTrue("linux" in DevicePluginManager.get_plugins())
with patch("chroma_agent.plugin_manager.EXCLUDED_PLUGINS", ["linux"]):
with patch.object(DevicePluginManager, "_plugins", {}):
self.assertTrue("linux" not in DevicePluginManager.get_plugins())
def test_initialise_block_device_drivers_called_at_startup(self):
"""Test method is added to list of functions to run on daemon startup."""
self.assertTrue(
initialise_block_device_drivers in agent_daemon_startup_functions
)
def test_terminate_block_device_drivers_called_at_teardown(self):
"""Test method is added to list of functions to run on daemon teardown."""
self.assertTrue(
terminate_block_device_drivers in agent_daemon_teardown_functions
)
class TestActionPlugins(unittest.TestCase):
def test_get_action_plugins(self):
"""Test that we get a list of loaded plugin classes."""
self.assertNotEqual(len(ActionPluginManager().commands), 0)
| StarcoderdataPython |
8070366 | <reponame>GetPastTheMonkey/advent-of-code<filename>aoc2018/day10/day10_part2.py<gh_stars>1-10
print("This is actually really simple:")
print("\t- Run part 1 of this day")
print("\t- Check the images folder")
print("\t- Look for the image where you can read the string")
print("\t- Check the number in the filename, this is your answer")
| StarcoderdataPython |
9760806 | from collections import defaultdict
from itertools import repeat
from typing import TypeVar, Iterable
from zpy.classes.bases.tree import Forest, Tree
from zpy.classes.collections.array import Array
from zpy.classes.logical.maybe import Maybe, Nothing, Just
T = TypeVar("T")
class UnionFind(Forest[T]):
class _UnionFindTree(Tree[T]):
def root(self) -> Maybe[T]:
return Just(self._root_idx)
def parent(self, idx: int) -> Maybe[T]:
parent_idx = self.uf._uf[idx]
if parent_idx < 0:
return Nothing()
return Just(parent_idx)
def children(self, idx: int) -> Array[T]:
return Array(self.uf.children(idx))
def root_idx(self) -> int:
return self._root_idx
def parent_idx(self, idx: int) -> int:
parent_idx = self.uf._uf[idx]
if parent_idx < 0:
raise IndexError("No parent")
return parent_idx
def children_idx(self, idx: int) -> Array[int]:
return self.children(idx)
def get(self, idx: int) -> Maybe[T]:
if 0 <= idx < self.uf.n:
return Just(idx)
return Nothing()
def __getitem__(self, idx: int) -> T:
return idx
def __init__(self, uf: "UnionFind[T]", root: int):
self._root_idx = root
self.uf = uf
def __init__(self, n):
self.n = n
self._uf = Array(repeat(-1, times=self.n))
self._ranks = Array(repeat(1, times=self.n))
self._children = Array([set() for _ in range(self.n)])
self.roots = set(range(self.n))
def root(self, x):
if self._uf[x] < 0:
return x
return self.root(self._uf[x])
def parent(self, x):
if self._uf[x] < 0:
return x
return self._uf[x]
def children(self, x):
return set(self._children[x])
def flatten(self):
for idx in range(self.n):
root = self.root(idx)
if root != idx:
parent = self.parent(idx)
self._uf[idx] = root
self._children[root].add(idx)
self._children[parent].remove(idx)
def rank(self, x):
return self._ranks[self.root(x)]
def size(self, x):
return -self._uf[self.root(x)]
def same(self, x, y):
return self.root(x) == self.root(y)
def unite(self, x, y):
x, y = map(self.root, (x, y))
if x == y:
return
rank_x, rank_y = map(self.rank, (x, y))
if rank_x < rank_y:
x, y = y, x
rank_x, rank_y = rank_y, rank_x
self._uf[x] += self._uf[y]
self._uf[y] = x
self.roots.remove(y)
self._children[x].add(y)
if rank_x == rank_y:
self._ranks[x] += 1
def trees(self) -> Iterable[Tree[T]]:
return Array(map(lambda r: self._UnionFindTree(self, r), self.roots))
def __repr__(self):
cls = type(self)
return f"{cls.__name__}({self.n})"
| StarcoderdataPython |
1758885 | #!/usr/bin/python
#
# This tools exploits the data of csv files produced by script collect-ce-job-status.py, to
# compute the running ratio R/(R+W) as a function of time
#
# Results are stored in file running_ratio.csv.
import os
import csv
import globvars
# -------------------------------------------------------------------------
# Compute the running ratio R/(R+W) as a function of time
# Input:
# dataFiles: list of tuples: (fileName, datetime, date, hour, rows, sum_VO_Waiting, sum_VO_Running)
# where
# - datetime is formated as "YYYY-MM-DD HH:MM:SS"
# - date is only the date part YYYY:MM:DD, and hour is only the hour HH (used for filtering data in excel file)
# - rows is a dictionnary wich keys are the hostnames and values are another dictionnary with the following keys:
# 'Site'
# 'ImplName', 'ImplVer'
# 'CE_Total', 'VO_Total'
# 'CE_Running', 'VO_Running'
# 'CE_Waiting', 'VO_Waiting'
# 'CE_Running', 'VO_Running'
# 'CE_FreeSlots', 'VO_FreeSlots'
# 'CE_MaxTotal', 'VO_MaxTotal'
# 'CE_MaxWaiting', 'VO_MaxWaiting'
# 'CE_MaxRunning', 'VO_MaxRunning'
# 'CE_WRT', 'VO_WRT'
# 'CE_MaxTotal', 'VO_MaxTotal'
# 'CE_ERT', 'VO_ERT'
# 'CE_Status'
# -------------------------------------------------------------------------
def process(dataFiles):
# Global variables
DECIMAL_MARK = globvars.DECIMAL_MARK
DEBUG = globvars.DEBUG
OUTPUT_DIR = globvars.OUTPUT_DIR
print "Computing the mean ratio R/(R+W) as a function of time..."
outputFile = globvars.OUTPUT_DIR + os.sep + "running_ratio.csv"
outputf = open(outputFile, 'wb')
writer = csv.writer(outputf, delimiter=';')
writer.writerow(["# Date time", "Waiting", "Running", "R/(R+W)"])
# Loop on all data files that were acquired
for (fileName, datetime, date, hour, rows, sum_VO_Waiting, sum_VO_Running) in dataFiles:
R = float(sum_VO_Running)
W = float(sum_VO_Waiting)
if R+W > 0:
writer.writerow([datetime, sum_VO_Waiting, sum_VO_Running, str(round(R/(R+W), 4)).replace('.', globvars.DECIMAL_MARK) ])
outputf.close()
| StarcoderdataPython |
9718355 | <reponame>surroundaustralia/Prez
from typing import Dict, Optional, Union
from fastapi.responses import Response, JSONResponse, PlainTextResponse
from connegp import RDF_MEDIATYPES, MEDIATYPE_NAMES
from renderers import ListRenderer
from config import *
from profiles.vocprez_profiles import dcat, dd
from models.vocprez import VocPrezCollectionList
from utils import templates
class VocPrezCollectionListRenderer(ListRenderer):
profiles = {"dcat": dcat, "dd": dd}
default_profile_token = "dcat"
def __init__(
self,
request: object,
instance_uri: str,
label: str,
comment: str,
collection_list: VocPrezCollectionList,
page: int,
per_page: int,
member_count: int
) -> None:
super().__init__(
request,
VocPrezCollectionListRenderer.profiles,
VocPrezCollectionListRenderer.default_profile_token,
instance_uri,
collection_list.members,
label,
comment,
page,
per_page,
member_count
)
self.collection_list = collection_list
def _render_dcat_html(self, template_context: Union[Dict, None]):
"""Renders the HTML representation of the DCAT profile for a dataset"""
_template_context = {
"request": self.request,
"members": self.members,
"uri": self.instance_uri,
"pages": self.pages,
"label": self.label,
"comment": self.comment,
"profiles": self.profiles,
"default_profile": self.default_profile_token,
"mediatype_names": MEDIATYPE_NAMES
}
if template_context is not None:
_template_context.update(template_context)
return templates.TemplateResponse(
"vocprez/vocprez_collections.html", context=_template_context, headers=self.headers
)
def _render_dcat_json(self):
return JSONResponse(
content={
"uri": self.instance_uri,
"members": self.members,
"label": self.label,
"comment": self.comment,
},
media_type="application/json",
headers=self.headers,
)
def _render_dcat_rdf(self):
return Response(content="test DCAT RDF")
def _render_dcat(self, template_context: Union[Dict, None]):
if self.mediatype == "text/html":
return self._render_dcat_html(template_context)
elif self.mediatype in RDF_MEDIATYPES:
return self._render_dcat_rdf()
else: # application/json
return self._render_dcat_json()
def _render_dd_json(self) -> JSONResponse:
"""Renders the json representation of the dd profile for a list of collections"""
return JSONResponse(
content=self.collection_list.get_collection_flat_list(),
media_type="application/json",
headers=self.headers,
)
def _render_dd(self):
"""Renders the dd profile for a list of collections"""
return self._render_dd_json()
def render(
self, template_context: Optional[Dict] = None
) -> Union[
PlainTextResponse, templates.TemplateResponse, Response, JSONResponse, None
]:
if self.error is not None:
return PlainTextResponse(self.error, status_code=400)
elif self.profile == "mem":
return self._render_mem(template_context)
elif self.profile == "alt":
return self._render_alt(template_context)
elif self.profile == "dcat":
return self._render_dcat(template_context)
elif self.profile == "dd":
return self._render_dd()
else:
return None
| StarcoderdataPython |
5036139 | '''
Copyright 2017 <NAME>
Changes authored by <NAME>:
Copyright 2018 The Johns Hopkins University Applied Physics Laboratory LLC.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import atexit
import logging
try:
import RPi.GPIO as GPIO
except (ImportError, RuntimeError):
from . import mockGPIO as GPIO
from .utils import do_callback
_GPIO_MODE = GPIO.BOARD
_CONNECTION_STATUS_RED_PIN = 22
_CONNECTION_STATUS_GREEN_PIN = 24
_CONNECTION_STATUS_BLUE_PIN = 26
_MESSAGE_PENDING_PIN = 16
_RING_INDICATOR_PIN = 12
_RING_INDICATOR_PUD = GPIO.PUD_DOWN
RED = 1
YELLOW = 2
GREEN = 3
BLUE = 4
_logger = logging.getLogger('holonet.holonetGPIO')
class HolonetGPIOProtocol(object): # pylint: disable=too-few-public-methods
def holonetGPIORingIndicatorChanged(self, status):
pass
class HolonetGPIO(object):
def __init__(self, callback):
self.callback = callback
atexit.register(_cleanup)
GPIO.setmode(_GPIO_MODE)
GPIO.setup(_RING_INDICATOR_PIN, GPIO.IN,
pull_up_down=_RING_INDICATOR_PUD)
GPIO.setup(_CONNECTION_STATUS_RED_PIN, GPIO.OUT)
GPIO.setup(_CONNECTION_STATUS_GREEN_PIN, GPIO.OUT)
GPIO.setup(_CONNECTION_STATUS_BLUE_PIN, GPIO.OUT)
GPIO.setup(_MESSAGE_PENDING_PIN, GPIO.OUT)
GPIO.add_event_detect(_RING_INDICATOR_PIN, GPIO.BOTH,
callback=self._ring_indicator_callback)
@staticmethod
def set_led_connection_status(status):
_logger.debug('Connection status LED: %s', status)
r = _boolToGPIO(status == RED or status == YELLOW)
g = _boolToGPIO(status == YELLOW or status == GREEN)
b = _boolToGPIO(status == BLUE)
GPIO.output(_CONNECTION_STATUS_RED_PIN, r)
GPIO.output(_CONNECTION_STATUS_GREEN_PIN, g)
GPIO.output(_CONNECTION_STATUS_BLUE_PIN, b)
@staticmethod
def set_led_message_pending(status):
_logger.debug('Message pending LED: %s', status)
val = _boolToGPIO(status)
GPIO.output(_MESSAGE_PENDING_PIN, val)
def _ring_indicator_callback(self, _channel):
status = bool(GPIO.input(_RING_INDICATOR_PIN))
self._do_callback(HolonetGPIOProtocol.holonetGPIORingIndicatorChanged,
status)
def _do_callback(self, f, *args):
do_callback(self.callback, f, *args)
def _boolToGPIO(v):
return GPIO.HIGH if v else GPIO.LOW
def _cleanup():
GPIO.cleanup()
| StarcoderdataPython |
18062 | <gh_stars>1-10
from django.conf.urls import patterns, url
from main import views
urlpatterns = patterns('',
url(r'^$', views.inicio, name='inicio'),
url(r'^acerca/', views.acerca, name='acerca'),
url(r'^contacto/', views.contacto, name='contacto'),
url(r'^autenticar/', views.autenticar, name='autenticar'),
url(r'^cerrar_sesion/', views.cerrar_sesion, name='cerrar_sesion'),
url(r'^tiempo/', views.tiempo, name='tiempo'),
url(r'^perfil/(?P<usuario>\d+)/$', views.perfil, name='perfil'),
url(r'^imprimir_ajuste/', views.imprimir_ajuste, name='imprimir_ajuste'),
url(r'^imprimir_ajusteId/(?P<ajusteEstudianteId>\d+)/$', views.imprimir_ajusteId,
name='imprimir_ajusteId'),
url(r'^imprimir_expediente/', views.imprimir_expediente, name='imprimir_expediente'),
url(r'^imprimir_expedienteId/(?P<expedienteEstudianteId>\d+)/$', views.imprimir_expedienteId,
name='imprimir_expedienteId'),
) | StarcoderdataPython |
4984485 | <gh_stars>10-100
import itertools
from string import Template
import numpy as np
from PuzzleLib.Cuda.Utils import roundUpDiv
upsampleNearestTmpl = Template("""
extern "C"
__global__ void upsample2dNearest(float *outdata, const float *indata, int inh, int inw, int outh, int outw,
int hscale, int wscale)
{
__shared__ float shdata[$hBlockSize][$wBlockSize];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
if (y >= inh || x >= inw) return;
shdata[threadIdx.y][threadIdx.x] = indata[z * inh * inw + y * inw + x];
__syncthreads();
for (int i = 0; i < hscale; i++)
for (int j = 0; j < wscale; j++)
{
int outidx = z * outh * outw + (y * hscale + i) * outw + (x * wscale + j);
outdata[outidx] = shdata[threadIdx.y][threadIdx.x];
}
}
extern "C"
__global__ void upsample2dNearestBackward(float *ingrad, const float *outgrad, int inw, int outw,
int hscale, int wscale, int insize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= insize) return;
int x = (idx % inw) * wscale;
int y = (idx / inw) * hscale;
float acc = 0.0f;
for (int i = 0; i < wscale; i++)
for (int j = 0; j < hscale; j++)
acc += outgrad[(y + j) * outw + x + i];
ingrad[idx] = acc;
}
extern "C"
__global__ void upsample3dNearest(float *outdata, const float *indata, int ind, int inh, int inw,
int outd, int outh, int outw, int dscale, int hscale, int wscale)
{
__shared__ float shdata[$hBlockSize][$wBlockSize];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
if (y >= inh || x >= inw) return;
shdata[threadIdx.y][threadIdx.x] = indata[z * inh * inw + y * inw + x];
__syncthreads();
for (int i = 0; i < dscale; i++)
for (int j = 0; j < hscale; j++)
for (int k = 0; k < wscale; k++)
{
int outidx = (z * dscale + i) * outh * outw + (y * hscale + j) * outw + (x * wscale + k);
outdata[outidx] = shdata[threadIdx.y][threadIdx.x];
}
}
extern "C"
__global__ void upsample3dNearestBackward(float *ingrad, const float *outgrad, int inh, int inw, int outh, int outw,
int dscale, int hscale, int wscale, int insize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= insize) return;
int x = (idx % inw) * wscale;
int y = ((idx % (inh * inw)) / inw) * hscale;
int z = idx / (inh * inw) * dscale;
float acc = 0.0f;
for (int i = 0; i < dscale; i++)
for (int j = 0; j < hscale; j++)
for (int k = 0; k < wscale; k++)
acc += outgrad[(z + i) * outh * outw + (y + j) * outw + x + k];
ingrad[idx] = acc;
}
""")
upsampleLinearTmpl = Template("""
extern "C"
__global__ void upsample2dLinear(float *outdata, const float *indata, int batchsize, int maps, int inh, int inw,
int outh, int outw, float rh, float rw)
{
int outx = blockIdx.x * blockDim.x + threadIdx.x;
int outy = blockIdx.y * blockDim.y + threadIdx.y;
if (outx >= outw || outy >= outh) return;
float h1r = rh * outy;
int h1 = h1r;
int h1p = (h1 < inh - 1) ? 1 : 0;
float dh1 = h1r - h1;
float dh0 = 1.0f - dh1;
float w1r = rw * outx;
int w1 = w1r;
int w1p = (w1 < inw - 1) ? 1 : 0;
float dw1 = w1r - w1;
float dw0 = 1.0f - dw1;
for (int b = 0; b < batchsize ; b++)
{
int obstride = b * maps * outh * outw;
int ibstride = b * maps * inh * inw;
for (int c = 0; c < maps; c++)
{
int ocstride = c * outh * outw;
int icstride = c * inh * inw;
float val = dh0 * (dw0 * indata[ibstride + icstride + h1 * inw + w1] +
dw1 * indata[ibstride + icstride + h1 * inw + w1 + w1p]) +
dh1 * (dw0 * indata[ibstride + icstride + (h1 + h1p) * inw + w1] +
dw1 * indata[ibstride + icstride + (h1 + h1p) * inw + w1 + w1p]);
outdata[obstride + ocstride + outy * outw + outx] = val;
}
}
}
extern "C"
__global__ void upsample2dLinearBackward(float *ingrad, const float *outgrad, int batchsize, int maps,
int inh, int inw, int outh, int outw, float rh, float rw)
{
int outx = blockIdx.x * blockDim.x + threadIdx.x;
int outy = blockIdx.y * blockDim.y + threadIdx.y;
if (outx >= outw || outy >= outh) return;
float h1r = rh * outy;
int h1 = h1r;
int h1p = (h1 < inh - 1) ? 1 : 0;
float dh1 = h1r - h1;
float dh0 = 1.0f - dh1;
float w1r = rw * outx;
int w1 = w1r;
int w1p = (w1 < inw - 1) ? 1 : 0;
float dw1 = w1r - w1;
float dw0 = 1.0f - dw1;
for (int b = 0; b < batchsize; b++)
{
int obstride = b * maps * outh * outw;
int ibstride = b * maps * inh * inw;
for (int c = 0; c < maps; c++)
{
int ocstride = c * outh * outw;
int icstride = c * inh * inw;
float val = outgrad[obstride + ocstride + outy * outw + outx];
atomicAdd(&ingrad[ibstride + icstride + h1 * inw + w1], dh0 * dw0 * val);
atomicAdd(&ingrad[ibstride + icstride + h1 * inw + w1 + w1p], dh0 * dw1 * val);
atomicAdd(&ingrad[ibstride + icstride + (h1 + h1p) * inw + w1], dh1 * dw0 * val);
atomicAdd(&ingrad[ibstride + icstride + (h1 + h1p) * inw + w1 + w1p], dh1 * dw1 * val);
}
}
}
extern "C"
__global__ void upsample3dLinear(float *outdata, const float *indata, int batchsize, int maps,
int ind, int inh, int inw, int outd, int outh, int outw, float rd, float rh, float rw)
{
int outx = blockIdx.x * blockDim.x + threadIdx.x;
int outy = blockIdx.y * blockDim.y + threadIdx.y;
int outz = blockIdx.z;
if (outx >= outw || outy >= outh) return;
float d1r = rd * outz;
int d1 = d1r;
int d1p = (d1 < ind - 1) ? 1 : 0;
float dd1 = d1r - d1;
float dd0 = 1.0f - dd1;
float h1r = rh * outy;
int h1 = h1r;
int h1p = (h1 < inh - 1) ? 1 : 0;
float dh1 = h1r - h1;
float dh0 = 1.0f - dh1;
float w1r = rw * outx;
int w1 = w1r;
int w1p = (w1 < inw - 1) ? 1 : 0;
float dw1 = w1r - w1;
float dw0 = 1.0f - dw1;
for (int b = 0; b < batchsize; b++)
{
int obstride = b * maps * outd * outh * outw;
int ibstride = b * maps * ind * inh * inw;
for (int c = 0; c < maps; c++)
{
int ocstride = c * outd * outh * outw;
int icstride = c * ind * inh * inw;
float val =
dd0 * (dh0 * (dw0 * indata[ibstride + icstride + d1 * inh *inw + h1 * inw + w1] +
dw1 * indata[ibstride + icstride + d1 * inw *inw + h1 * inw + w1 + w1p]) +
dh1 * (dw0 * indata[ibstride + icstride + d1 * inh *inw + (h1 + h1p) * inw + w1] +
dw1 * indata[ibstride + icstride + d1 * inh *inw + (h1 + h1p) * inw + w1 + w1p])) +
dd1 * (dh0 * (dw0 * indata[ibstride + icstride + (d1 + d1p) * inh * inw + h1 * inw + w1] +
dw1 * indata[ibstride + icstride + (d1 + d1p) * inh * inw + h1 * inw + w1 + w1p]) +
dh1 * (dw0 * indata[ibstride + icstride + (d1 + d1p) * inh * inw + (h1 + h1p) * inw + w1] +
dw1 * indata[ibstride + icstride + (d1 + d1p) * inh * inw + (h1 + h1p) * inw + w1 + w1p]));
outdata[obstride + ocstride + outz * outh * outw + outy * outw + outx] = val;
}
}
}
extern "C"
__global__ void upsample3dLinearBackward(float *ingrad, const float *outgrad, int batchsize, int maps,
int ind, int inh, int inw, int outd, int outh, int outw,
float rd, float rh, float rw)
{
int outx = blockIdx.x * blockDim.x + threadIdx.x;
int outy = blockIdx.y * blockDim.y + threadIdx.y;
int outz = blockIdx.z;
if (outx >= outw || outy >= outh) return;
float d1r = rd * outz;
int d1 = d1r;
int d1p = (d1 < ind - 1) ? 1 : 0;
float dd1 = d1r - d1;
float dd0 = 1.0f - dd1;
float h1r = rh * outy;
int h1 = h1r;
int h1p = (h1 < inh - 1) ? 1 : 0;
float dh1 = h1r - h1;
float dh0 = 1.0f - dh1;
float w1r = rw * outx;
int w1 = w1r;
int w1p = (w1 < inw - 1) ? 1 : 0;
float dw1 = w1r - w1;
float dw0 = 1.0f - dw1;
for (int b = 0; b < batchsize; b++)
{
int obstride = b * maps * outd * outh * outw;
int ibstride = b * maps * ind * inh * inw;
for (int c = 0; c < maps; c++)
{
int ocstride = c * outd * outh * outw;
int icstride = c * ind * inh * inw;
float val = outgrad[obstride + ocstride + outz * outh * outw + outy * outw + outx];
atomicAdd(&ingrad[ibstride+icstride + d1 * inh*inw + h1 * inw + w1], dd0 * dh0 * dw0 * val);
atomicAdd(&ingrad[ibstride+icstride + d1 * inh*inw + h1 * inw + w1+w1p], dd0 * dh0 * dw1 * val);
atomicAdd(&ingrad[ibstride+icstride + d1 * inh*inw + (h1+h1p) * inw + w1], dd0 * dh1 * dw0 * val);
atomicAdd(&ingrad[ibstride+icstride + d1 * inh*inw + (h1+h1p) * inw + w1+w1p], dd0 * dh1 * dw1 * val);
atomicAdd(&ingrad[ibstride+icstride + (d1+d1p) * inh*inw + h1 * inw + w1], dd1 * dh0 * dw0 * val);
atomicAdd(&ingrad[ibstride+icstride + (d1+d1p) * inh*inw + h1 * inw + w1+w1p], dd1 * dh0 * dw1 * val);
atomicAdd(&ingrad[ibstride+icstride + (d1+d1p) * inh*inw + (h1+h1p) * inw + w1], dd1 * dh1 * dw0 * val);
atomicAdd(&ingrad[ibstride+icstride + (d1+d1p) * inh*inw + (h1+h1p) * inw + w1+w1p], dd1 * dh1 * dw1 * val);
}
}
}
""")
class UpsampleModule:
def __init__(self, backend):
self.backend, self.GPUArray = backend, backend.GPUArray
self.warpSize, self.nthreads = backend.warpSize, backend.nthreads
self.hblocksize, self.wblocksize = 4, self.warpSize
self.nearestMod = backend.SourceModule(upsampleNearestTmpl.substitute(
hBlockSize=self.hblocksize, wBlockSize=self.wblocksize
))
self.linearMod = backend.SourceModule(upsampleLinearTmpl.substitute())
def upsample2d(self, data, scale, mode="nearest", allocator=None):
batchsize, maps, inh, inw = data.shape
hscale, wscale = (scale, scale) if isinstance(scale, int) else scale
outh, outw = hscale * inh, wscale * inw
outdata = self.GPUArray.empty((batchsize, maps, outh, outw), dtype=data.dtype, allocator=allocator)
if mode == "nearest":
block = (self.wblocksize, self.hblocksize, 1)
grid = (roundUpDiv(inw, block[0]), roundUpDiv(inh, block[1]), batchsize * maps)
self.nearestMod.upsample2dNearest(
outdata, data, np.int32(inh), np.int32(inw), np.int32(outh), np.int32(outw),
np.int32(hscale), np.int32(wscale), block=block, grid=grid
)
elif mode == "linear":
block = (self.warpSize, self.nthreads // self.warpSize, 1)
grid = (roundUpDiv(outw, block[0]), roundUpDiv(outh, block[1]), 1)
rh, rw = (inh - 1) / (outh - 1), (inw - 1) / (outw - 1)
self.linearMod.upsample2dLinear(
outdata, data, np.int32(batchsize), np.int32(maps), np.int32(inh), np.int32(inw),
np.int32(outh), np.int32(outw), np.float32(rh), np.float32(rw), block=block, grid=grid
)
else:
raise NotImplementedError(mode)
return outdata
def upsample2dBackward(self, grad, scale, mode="nearest", allocator=None):
batchsize, maps, outh, outw = grad.shape
hscale, wscale = (scale, scale) if isinstance(scale, int) else scale
inh, inw = outh // hscale, outw // wscale
if mode == "nearest":
ingrad = self.GPUArray.empty((batchsize, maps, inh, inw), dtype=grad.dtype, allocator=allocator)
blk = self.warpSize * 8
block = (blk, 1, 1)
grid = (roundUpDiv(ingrad.size, blk), 1, 1)
self.nearestMod.upsample2dNearestBackward(
ingrad, grad, np.int32(inw), np.int32(outw), np.int32(hscale), np.int32(wscale), np.int32(ingrad.size),
block=block, grid=grid
)
elif mode == "linear":
ingrad = self.GPUArray.zeros((batchsize, maps, inh, inw), dtype=grad.dtype, allocator=allocator)
block = (self.warpSize, self.nthreads // self.warpSize, 1)
grid = (roundUpDiv(outw, block[0]), roundUpDiv(outh, block[1]), 1)
rh, rw = (inh - 1) / (outh - 1), (inw - 1) / (outw - 1)
self.linearMod.upsample2dLinearBackward(
ingrad, grad, np.int32(batchsize), np.int32(maps), np.int32(inh), np.int32(inw),
np.int32(outh), np.int32(outw), np.float32(rh), np.float32(rw), block=block, grid=grid
)
else:
raise NotImplementedError(mode)
return ingrad
def upsample3d(self, data, scale, mode="nearest", allocator=None):
batchsize, maps, ind, inh, inw = data.shape
dscale, hscale, wscale = (scale, scale, scale) if isinstance(scale, int) else scale
outd, outh, outw = dscale * ind, hscale * inh, wscale * inw
outdata = self.GPUArray.empty((batchsize, maps, outd, outh, outw), dtype=data.dtype, allocator=allocator)
if mode == "nearest":
block = (self.wblocksize, self.hblocksize, 1)
grid = (roundUpDiv(inw, block[0]), roundUpDiv(inh, block[1]), batchsize * maps * ind)
self.nearestMod.upsample3dNearest(
outdata, data, np.int32(ind), np.int32(inh), np.int32(inw),
np.int32(outd), np.int32(outh), np.int32(outw), np.int32(dscale), np.int32(hscale), np.int32(wscale),
block=block, grid=grid
)
elif mode == "linear":
block = (self.warpSize, self.nthreads // self.warpSize, 1)
grid = (roundUpDiv(outw, block[0]), roundUpDiv(outh, block[1]), outd)
rd, rh, rw = (ind - 1) / (outd - 1), (inh - 1) / (outh - 1), (inw - 1) / (outw - 1)
self.linearMod.upsample3dLinear(
outdata, data, np.int32(batchsize), np.int32(maps), np.int32(ind), np.int32(inh), np.int32(inw),
np.int32(outd), np.int32(outh), np.int32(outw), np.float32(rd), np.float32(rh), np.float32(rw),
block=block, grid=grid
)
else:
raise NotImplementedError(mode)
return outdata
def upsample3dBackward(self, grad, scale, mode="nearest", allocator=None):
batchsize, maps, outd, outh, outw = grad.shape
dscale, hscale, wscale = (scale, scale, scale) if isinstance(scale, int) else scale
ind, inh, inw = outd // dscale, outh // hscale, outw // wscale
if mode == "nearest":
ingrad = self.GPUArray.empty((batchsize, maps, ind, inh, inw), dtype=grad.dtype, allocator=allocator)
blk = self.warpSize * 8
block = (blk, 1, 1)
grid = (roundUpDiv(ingrad.size, blk), 1, 1)
self.nearestMod.upsample3dNearestBackward(
ingrad, grad, np.int32(inh), np.int32(inw), np.int32(outh), np.int32(outw),
np.int32(dscale), np.int32(hscale), np.int32(wscale), np.int32(ingrad.size), block=block, grid=grid
)
elif mode == "linear":
ingrad = self.GPUArray.zeros((batchsize, maps, ind, inh, inw), dtype=grad.dtype, allocator=allocator)
block = (self.warpSize, self.nthreads // self.warpSize, 1)
grid = (roundUpDiv(outw, block[0]), roundUpDiv(outh, block[1]), outd)
rd, rh, rw = (ind - 1) / (outd - 1), (inh - 1) / (outh - 1), (inw - 1) / (outw - 1)
self.linearMod.upsample3dLinearBackward(
ingrad, grad, np.int32(batchsize), np.int32(maps), np.int32(ind), np.int32(inh), np.int32(inw),
np.int32(outd), np.int32(outh), np.int32(outw), np.float32(rd), np.float32(rh), np.float32(rw),
block=block, grid=grid
)
else:
raise NotImplementedError(mode)
return ingrad
def unittest():
from PuzzleLib.Cuda import Backend
backendTest(Backend)
def backendTest(Backend):
for deviceIdx in range(Backend.getDeviceCount()):
module = UpsampleModule(Backend.getBackend(deviceIdx))
upsample2dNearestTest(module)
upsample2dLinearTest(module)
upsample2dSpeedTest(module)
upsample3dNearestTest(module)
upsample3dLinearTest(module)
upsample3dSpeedTest(module)
def upsample2dNearestTest(module):
batchsize, maps, inh, inw = 1, 2, 16, 15
scale = 2
hostData = np.random.uniform(low=-1.0, high=1.0, size=(batchsize, maps, inh, inw)).astype(np.float32)
data = module.GPUArray.toGpu(hostData)
outdata = module.upsample2d(data, scale, mode="nearest")
hostOutData = np.empty(outdata.shape, dtype=np.float32)
for b, c, y, x in itertools.product(range(batchsize), range(maps), range(inh), range(inw)):
hostOutData[b, c, y * scale:(y + 1) * scale, x * scale:(x + 1) * scale] = hostData[b, c, y, x]
assert np.allclose(hostOutData, outdata.get())
hostGrad = np.random.randn(*outdata.shape).astype(np.float32)
grad = module.GPUArray.toGpu(hostGrad)
ingrad = module.upsample2dBackward(grad, scale)
hostInGrad = np.zeros(data.shape, dtype=np.float32)
for b, c, y, x, dy, dx in itertools.product(
range(batchsize), range(maps), range(inh), range(inw), range(scale), range(scale)
):
hostInGrad[b, c, y, x] += hostGrad[b, c, y * scale + dy, x * scale + dx]
assert np.allclose(hostInGrad, ingrad.get(), atol=1e-5)
def upsample2dLinearTest(module):
batchsize, maps, inh, inw = 3, 2, 4, 4
hscale, wscale = 2, 3
hostData = np.random.randn(batchsize, maps, inh, inw).astype(np.float32)
data = module.GPUArray.toGpu(hostData)
outdata = module.upsample2d(data, (hscale, wscale), mode="linear")
hostOutData = np.zeros(outdata.shape, dtype=np.float32)
rh, rw = (inh - 1) / (inh * hscale - 1), (inw - 1) / (inw * wscale - 1)
for b, c, y, x, in itertools.product(range(batchsize), range(maps), range(inh * hscale), range(inw * wscale)):
iny, inx = int(rh * y), int(rw * x)
dy, dx = 1.0 - (rh * y - iny), 1.0 - (rw * x - inx)
yi, xi = 1 if y < inh * hscale - 1 else 0, 1 if x < inw * wscale - 1 else 0
hostOutData[b, c, y, x] = dy * (dx * hostData[b, c, iny, inx] + (1 - dx) * hostData[b, c, iny, inx + xi]) + \
(1 - dy) * (dx * hostData[b, c, iny + yi, inx] +
(1 - dx) * hostData[b, c, iny + yi, inx + xi])
hostGrad = np.random.randn(*outdata.shape).astype(np.float32)
grad = module.GPUArray.toGpu(hostGrad)
ingrad = module.upsample2dBackward(grad, (hscale, wscale), mode="linear")
hostInGrad = np.zeros(data.shape, dtype=np.float32)
for b, c, y, x in itertools.product(range(batchsize), range(maps), range(inh * hscale), range(inw * wscale)):
iny, inx = int(rh * y), int(rw * x)
dy, dx = 1.0 - (rh * y - iny), 1.0 - (rw * x - inx)
yi, xi = 1 if y < inh * hscale - 1 else 0, 1 if x < inw * wscale - 1 else 0
val = hostGrad[b, c, y, x]
hostInGrad[b, c, iny, inx] += dy * dx * val
hostInGrad[b, c, iny, inx + xi] += dy * (1 - dx) * val
hostInGrad[b, c, iny + yi, inx] += (1 - dy) * dx * val
hostInGrad[b, c, iny + yi, inx + xi] += (1 - dy) * (1 - dx) * val
assert np.allclose(hostInGrad, ingrad.get(), atol=1e-5)
def upsample3dNearestTest(module):
batchsize, maps, ind, inh, inw = 4, 2, 3, 5, 3
scale = 2
hostData = np.random.randn(batchsize, maps, ind, inh, inw).astype(np.float32)
data = module.GPUArray.toGpu(hostData)
outdata = module.upsample3d(data, scale, mode="nearest")
hostOutData = np.empty(outdata.shape, dtype=np.float32)
for b, c, z, y, x in itertools.product(range(batchsize), range(maps), range(ind), range(inh), range(inw)):
hostOutData[b, c, z * scale:(z + 1) * scale, y * scale:(y + 1) * scale, x * scale:(x + 1) * scale] = \
hostData[b, c, z, y, x]
assert np.allclose(hostOutData, outdata.get())
hostGrad = np.random.randn(*outdata.shape).astype(np.float32)
grad = module.GPUArray.toGpu(hostGrad)
ingrad = module.upsample3dBackward(grad, scale)
hostInGrad = np.zeros(data.shape, dtype=np.float32)
for b, c, z, y, x, dz, dy, dx in itertools.product(
range(batchsize), range(maps), range(ind), range(inh), range(inw), range(scale), range(scale), range(scale)
):
hostInGrad[b, c, z, y, x] += hostGrad[b, c, z * scale + dz, y * scale + dy, x * scale + dx]
assert np.allclose(hostInGrad, ingrad.get())
def upsample3dLinearTest(module):
batchsize, maps, ind, inh, inw = 1, 2, 2, 2, 2
dscale, hscale, wscale = 2, 2, 1
hostData = np.random.randn(batchsize, maps, ind, inh, inw).astype(np.float32)
data = module.GPUArray.toGpu(hostData)
outdata = module.upsample3d(data, (dscale, hscale, wscale), mode="linear")
hostOutData = np.zeros(outdata.shape, dtype=np.float32)
rd, rh, rw = (ind - 1) / (ind * dscale - 1), (inh - 1) / (inh * hscale - 1), (inw - 1) / (inw * wscale - 1)
for b, c, z, y, x in itertools.product(
range(batchsize), range(maps), range(ind * dscale), range(inh * hscale), range(inw * wscale)
):
inz, iny, inx = int(rd * z), int(rh * y), int(rw * x)
dz, dy, dx = 1.0 - (rd * z - inz), 1.0 - (rh * y - iny), 1.0 - (rw * x - inx)
zi = 1 if z < ind * dscale - 1 else 0
yi = 1 if y < inh * hscale - 1 else 0
xi = 1 if x < inw * wscale - 1 else 0
hostOutData[b, c, z, y, x] = dz * (dy * (
dx * hostData[b, c, inz, iny, inx] + (1 - dx) * hostData[b, c, inz, iny, inx + xi]
) + (1 - dy) * (
dx * hostData[b, c, inz, iny + yi, inx] + (1 - dx) * hostData[b, c, inz, iny + yi, inx + xi]
)) + (1 - dz) * (dy * (
dx * hostData[b, c, inz+zi, iny, inx] + (1 - dx) * hostData[b, c, inz + zi, iny, inx + xi]
) + (1 - dy) * (
dx * hostData[b, c, inz + zi, iny + yi, inx] + (1 - dx) * hostData[b, c, inz + zi, iny + yi, inx + xi]
))
assert np.allclose(hostOutData, outdata.get())
hostGrad = np.random.randn(*outdata.shape).astype(np.float32)
grad = module.GPUArray.toGpu(hostGrad)
ingrad = module.upsample3dBackward(grad, (dscale, hscale, wscale), mode="linear")
hostInGrad = np.zeros(data.shape, dtype=np.float32)
for b, c, z, y, x in itertools.product(
range(batchsize), range(maps), range(ind * dscale), range(inh * hscale), range(inw * wscale)
):
inz, iny, inx = int(rd * z), int(rh * y), int(rw * x)
dz, dy, dx = 1.0 - (rd * z - inz), 1.0 - (rh * y - iny), 1.0 - (rw * x - inx)
zi = 1 if z < ind * dscale - 1 else 0
yi = 1 if y < inh * hscale - 1 else 0
xi = 1 if x < inw * wscale - 1 else 0
val = hostGrad[b, c, z, y, x]
hostInGrad[b, c, inz, iny, inx] += dz * dy * dx * val
hostInGrad[b, c, inz, iny, inx + xi] += dz * dy * (1 - dx) * val
hostInGrad[b, c, inz, iny + yi, inx] += dz * (1 - dy) * dx * val
hostInGrad[b, c, inz, iny + yi, inx + xi] += dz * (1 - dy) * (1 - dx) * val
hostInGrad[b, c, inz + zi, iny, inx] += (1 - dz) * dy * dx * val
hostInGrad[b, c, inz + zi, iny, inx + xi] += (1 - dz) * dy * (1 - dx) * val
hostInGrad[b, c, inz + zi, iny + yi, inx] += (1 - dz) * (1 - dy) * dx * val
hostInGrad[b, c, inz + zi, iny + yi, inx + xi] += (1 - dz) * (1 - dy) * (1 - dx) * val
assert np.allclose(hostInGrad, ingrad.get())
def upsample2dSpeedTest(module):
batchsize, maps, inh, inw = 32, 16, 32, 32
scale = 2
data = module.GPUArray.toGpu(np.random.randn(batchsize, maps, inh, inw).astype(np.float32))
bnd = module.backend
bnd.timeKernel(module.upsample2d, args=(data, scale, "nearest", bnd.memoryPool), logname="nearest 2d mode")
bnd.timeKernel(module.upsample2d, args=(data, scale, "linear", bnd.memoryPool), logname="linear 2d mode")
def upsample3dSpeedTest(module):
batchsize, maps, ind, inh, inw = 32, 16, 4, 32, 32
scale = 2
data = module.GPUArray.toGpu(np.random.randn(batchsize, maps, ind, inh, inw).astype(np.float32))
bnd = module.backend
bnd.timeKernel(module.upsample3d, args=(data, scale, "nearest", bnd.memoryPool), logname="nearest 3d mode")
bnd.timeKernel(module.upsample3d, args=(data, scale, "linear", bnd.memoryPool), logname="linear 3d mode")
if __name__ == "__main__":
unittest()
| StarcoderdataPython |
5035338 | <reponame>Staberinde/data-hub-api
# Generated by Django 3.2.6 on 2021-08-16 16:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('omis_quote', '0008_update_permissions_django_21'),
]
operations = [
migrations.AlterModelTable(
name='quote',
table='omis-quote_quote',
),
migrations.AlterModelTable(
name='termsandconditions',
table='omis-quote_termsandconditions',
),
]
| StarcoderdataPython |
1683577 | <filename>blog/admin.py
from django.contrib import admin
from blog.models import Category, Post, Comment
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'created_at', 'updated_at', 'is_published']
list_display_links = ['id', 'name', ]
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['id', 'get_category', 'title', 'get_author_name', 'created_at', 'updated_at', 'is_published']
list_display_links = ['id', 'title', ]
def get_author_name(self, obj):
return obj.author.username if obj.author else None
def get_category(self, obj):
return obj.category.name if obj.category else None
get_author_name.admin_order_field = 'author'
get_author_name.short_description = 'Author Name'
get_category.admin_order_field = 'category'
get_category.short_description = 'Category'
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ['id', 'short_body'] + [field.name for field in Comment._meta.get_fields() if
field.name not in ['id', 'body', 'post']]
list_display_links = ['short_body']
| StarcoderdataPython |
3448529 | """
Preprocess small barriers into data needed by API and tippecanoe for creating vector tiles.
This is run AFTER `preprocess_road_crossings.py`.
Inputs:
* Small barriers inventory from SARP, including all network metrics and summary unit IDs (HUC12, ECO3, ECO4, State, County, etc).
* `road_crossings.csv` created using `preprocess_road_crossings.py`
Outputs:
* `small_barriers.feather`: processed small barriers data for use by the API
* `small_barriers_with_networks.csv`: Dams with networks for creating vector tiles in tippecanoe
* `dams_without_networks.csv`: Dams without networks for creating vector tiles in tippecanoe
"""
import os
import sys
import csv
from time import time
import pandas as pd
import geopandas as gp
# Lazy way to import from calculate tiers from a shared file, this allows us to import
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from api.calculate_tiers import calculate_tiers, SCENARIOS
from api.domains import (
STATE_FIPS_DOMAIN,
HUC6_DOMAIN,
BARRIER_CONDITION_TO_DOMAIN,
POTENTIAL_TO_SEVERITY,
ROAD_TYPE_TO_DOMAIN,
CROSSING_TYPE_TO_DOMAIN,
)
from classify import (
classify_gainmiles,
classify_sinuosity,
classify_landcover,
classify_rarespp,
classify_streamorder,
)
start = time()
print("Reading source FGDB dataset")
df = gp.read_file(
"data/src/Road_Related_Barriers_DraftOne_Final.gdb",
layer="Road_Barriers_WebViewer_DraftOne_ALL_12132018",
)
# Filter out any dams that do not have a HUC12 or State (currently none filtered)
df = df.loc[df.HUC12.notnull() & df.STATE_FIPS.notnull()].copy()
# Per instructions from SARP, drop all Potential_Project=='SRI Only'
df = df.loc[df.Potential_Project != "SRI Only"].copy()
# Assign an ID. Note: this is ONLY valid for this exact version of the inventory
df["id"] = df.index.values.astype("uint32")
print("Projecting to WGS84 and adding lat / lon fields")
if not df.crs:
# set projection on the data using Proj4 syntax, since GeoPandas doesn't always recognize it's EPSG Code.
# It is in Albers (EPSG:102003): https://epsg.io/102003
df.crs = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs"
# Project to WGS84
df = df.to_crs(epsg=4326)
# Add lat / lon columns from projected geometry
df["lon"] = df.geometry.x.astype("float32")
df["lat"] = df.geometry.y.astype("float32")
# drop geometry, no longer needed
df = df.drop(columns=["geometry"])
# Rename ecoregion columns
df.rename(columns={"NA_L3CODE": "ECO3", "US_L4CODE": "ECO4"}, inplace=True)
# Rename all columns that have underscores
df.rename(
columns={c: c.replace("_", "") for c in df.columns[df.columns.str.count("_") > 0]},
inplace=True,
)
# Rename columns to make them easier to handle
# also rename fields to match dams for consistency
df.rename(
columns={
"STATE": "State",
"OnConservationLand": "ProtectedLand",
"NumberRareSpeciesHUC12": "RareSpp",
"TownId": "County",
# Note re: SARPID - this isn't quite correct but needed for consistency
"AnalysisId": "SARPID",
"AbsoluteGainMi": "GainMiles",
"PctNatFloodplain": "Landcover",
"NetworkSinuosity": "Sinuosity",
"NumSizeClassesGained": "SizeClasses",
"CrossingTypeId": "CrossingType",
"RoadTypeId": "RoadType",
"CrossingConditionId": "Condition",
"StreamName": "Stream",
},
inplace=True,
)
# Flag if the field has a network
# TODO: remove no-upstream from network analysis
df["HasNetwork"] = ~(
df.GainMiles.isnull() | (df.PotentialProject == "No Upstream Channel")
)
######### Fix data issues
# Join in state from FIPS due to data issue with values in State field (many are missing)
df.State = df.STATEFIPS.map(STATE_FIPS_DOMAIN)
# Fix COUNTYFIPS: leading 0's and convert to string
df.COUNTYFIPS = df.COUNTYFIPS.astype("int").astype(str).str.pad(5, fillchar="0")
# Drop ' County' from County field
df.County = df.County.fillna("").str.replace(" County", "")
# Fix mixed casing of values
for column in ("CrossingType", "RoadType", "Stream", "Road"):
df[column] = df[column].fillna("Unknown").str.title().str.strip()
df.loc[df[column].str.len() == 0, column] = "Unknown"
# Fix line returns in stream name and road name
df.loc[df.SARPID == "sm7044", "Stream"] = "Unnamed"
df.Road = df.Road.str.replace("\r\n", "")
# Fix issues with RoadType
df.loc[df.RoadType.isin(("No Data", "NoData", "Nodata")), "RoadType"] = "Unknown"
# Fix issues with Condition
df.Condition = df.Condition.fillna("Unknown")
df.loc[
(df.Condition == "No Data")
| (df.Condition == "No data")
| (df.Condition.str.strip().str.len() == 0),
"Condition",
] = "Unknown"
######### Fill NaN fields and set data types
for column in ("CrossingCode", "LocalID", "Source"):
df[column] = df[column].fillna("").str.strip()
for column in ("RareSpp", "ProtectedLand"):
df[column] = df[column].fillna(0).astype("uint8")
# Fill metrics with -1
for column in ("Landcover", "SizeClasses"): # null but with network should be 0
df[column] = df[column].fillna(-1).astype("int8")
# Round floating point columns to 3 decimals
for column in (
"Sinuosity",
"GainMiles",
"UpstreamMiles",
"DownstreamMiles",
"TotalNetworkMiles",
):
df[column] = df[column].round(3).fillna(-1).astype("float32")
######## Calculate derived fields
print("Calculating derived values")
# Construct a name from Stream and Road
df["Name"] = "" # "Unknown Crossing"
df.loc[(df.Stream != "Unknown") & (df.Road != "Unknown"), "Name"] = (
df.Stream + " / " + df.Road + " Crossing"
)
# df.loc[(df.Stream != "Unknown") & (df.Road == "Unknown"), "Name"] = (
# df.Stream + " / Unknown Road Crossing"
# )
# df.loc[(df.Stream == "Unknown") & (df.Road != "Unknown"), "Name"] = (
# " Unknown Stream / " + df.Road + " Crossing"
# )
# Calculate HUC and Ecoregion codes
df["HUC6"] = df["HUC12"].str.slice(0, 6) # basin
df["HUC8"] = df["HUC12"].str.slice(0, 8) # subbasin
df["Basin"] = df.HUC6.map(HUC6_DOMAIN)
df["ConditionClass"] = df.Condition.map(BARRIER_CONDITION_TO_DOMAIN)
df["SeverityClass"] = df.PotentialProject.map(POTENTIAL_TO_SEVERITY)
df["CrossingTypeClass"] = df.CrossingType.map(CROSSING_TYPE_TO_DOMAIN)
df["RoadTypeClass"] = df.RoadType.map(ROAD_TYPE_TO_DOMAIN)
# Bin metrics
df["GainMilesClass"] = classify_gainmiles(df.GainMiles)
df["SinuosityClass"] = classify_sinuosity(df.Sinuosity)
df["LandcoverClass"] = classify_landcover(df.Landcover)
df["RareSppClass"] = classify_rarespp(df.RareSpp)
########## Drop unnecessary columns
df = df[
[
"id",
"lat",
"lon",
# ID and source info
"SARPID",
"CrossingCode",
"LocalID",
"Source",
# Basic info
"Name",
"County",
"State",
"Basin",
# Species info
"RareSpp",
# Stream info
"Stream",
# Road info
"Road",
"RoadType",
# Location info
"ProtectedLand",
# "HUC2",
"HUC6",
"HUC8",
"HUC12",
"ECO3",
"ECO4",
# Barrier info
"CrossingType",
"Condition",
"PotentialProject",
# Metrics
"GainMiles",
"UpstreamMiles",
"DownstreamMiles",
"TotalNetworkMiles",
"Landcover",
"Sinuosity",
"SizeClasses",
# Internal fields
"COUNTYFIPS",
"STATEFIPS",
"HasNetwork",
"RareSppClass",
"GainMilesClass",
"SinuosityClass",
"LandcoverClass",
"ConditionClass",
"SeverityClass",
"CrossingTypeClass",
"RoadTypeClass",
]
].set_index("id", drop=False)
# Calculate tiers and scores for the region (None) and State levels
for group_field in (None, "State"):
print("Calculating tiers for {}".format(group_field or "Region"))
# Note: some states do not yet have enough inventoried barriers for percentiles to work
tiers_df = calculate_tiers(
df.loc[df.HasNetwork],
SCENARIOS,
group_field=group_field,
prefix="SE" if group_field is None else group_field,
percentiles=False,
topn=False,
)
df = df.join(tiers_df)
# Fill n/a with -1 for tiers and cast columns to integers
df[tiers_df.columns] = df[tiers_df.columns].fillna(-1)
for col in tiers_df.columns:
if col.endswith("_tier") or col.endswith("_p"):
df[col] = df[col].astype("int8")
elif col.endswith("_top"):
df[col] = df[col].astype("int16")
elif col.endswith("_score"):
# Convert to a 100% scale
df[col] = (df[col] * 100).round().astype("uint16")
# Tiers are used to display the given barrier on a relative scale
# compared to other barriers in the state and region.
# Drop associated raw scores, they are not currently displayed on frontend.
df = df.drop(columns=[c for c in df.columns if c.endswith("_score")])
######## Export data for API
print("Writing to files")
df.reset_index(drop=True).to_feather("data/derived/small_barriers.feather")
# For QA and data exploration only
df.to_csv("data/derived/small_barriers.csv", index=False)
######## Export data for tippecanoe
# create duplicate columns for those dropped by tippecanoe
# tippecanoe will use these ones and leave lat / lon
# so that we can use them for display in the frontend
df["latitude"] = df.lat
df["longitude"] = df.lon
# Drop columns that are not used in vector tiles
df = df.drop(columns=["Sinuosity", "STATEFIPS", "CrossingCode", "LocalID"])
# Rename columns for easier use
df.rename(
columns={
"County": "CountyName",
"COUNTYFIPS": "County",
"SinuosityClass": "Sinuosity",
},
inplace=True,
)
# lowercase all fields except those for unit IDs
df.rename(
columns={
k: k.lower()
for k in df.columns
if k not in ("State", "County", "HUC6", "HUC8", "HUC12", "ECO3", "ECO4")
},
inplace=True,
)
# Split datasets based on those that have networks
# This is done to control the size of the dam vector tiles, so that those without
# networks are only used when zoomed in further. Otherwise, the full vector tiles
# get too large, and points that we want to display are dropped by tippecanoe.
# Road / stream crossings are particularly large, so those are merged in below.
df.loc[df.hasnetwork].drop(columns=["hasnetwork"]).to_csv(
"data/derived/barriers_with_networks.csv", index=False, quoting=csv.QUOTE_NONNUMERIC
)
# Drop columns we don't need from dams that have no networks, since we do not filter or display
# these fields.
no_network = df.loc[~df.hasnetwork].drop(
columns=[
"hasnetwork",
"sinuosity",
"sizeclasses",
"upstreammiles",
"downstreammiles",
"totalnetworkmiles",
"gainmiles",
"landcover",
"gainmilesclass",
"raresppclass",
"landcoverclass",
"conditionclass",
"severityclass",
"crossingtypeclass",
"roadtypeclass",
"County",
"HUC6",
"HUC8",
"HUC12",
"ECO3",
"ECO4",
]
+ [c for c in df.columns if c.endswith("_tier")]
)
# Combine barriers that don't have networks with road / stream crossings
print("Reading stream crossings")
road_crossings = pd.read_csv(
"data/derived/road_crossings.csv", dtype={"Road": str, "Stream": str, "SARPID": str}
)
road_crossings.Stream = road_crossings.Stream.str.strip()
road_crossings.Road = road_crossings.Road.str.strip()
road_crossings.rename(
columns={c: c.lower() for c in road_crossings.columns}, inplace=True
)
# Zero out some fields
road_crossings["protectedland"] = 0
road_crossings["rarespp"] = 0
road_crossings["name"] = ""
road_crossings.loc[
(road_crossings.stream.str.strip().str.len() > 0)
& (road_crossings.road.str.strip().str.len() > 0),
"name",
] = (road_crossings.stream + " / " + road_crossings.road)
combined = no_network.append(road_crossings, ignore_index=True, sort=False)
combined["id"] = combined.index.values.astype("uint32")
# Fill latitude / longitude columns in again
# (these are missing from road_crossings.csv)
combined["latitude"] = combined.lat
combined["longitude"] = combined.lon
print("Writing combined file")
combined.to_csv(
"data/derived/barriers_background.csv", index=False, quoting=csv.QUOTE_NONNUMERIC
)
print("Done in {:.2f}".format(time() - start))
| StarcoderdataPython |
1822033 | '''ResNet1d in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
This is a copy-paste from: https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py
I just changed 2d conv and batch norms by 1d ones
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv1d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.GroupNorm(planes//2, planes)
#self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes//2, planes)
#self.bn2 = nn.BatchNorm1d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv1d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv1d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.GroupNorm(planes//2, planes)
#self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes//2, planes)
#self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn2 = nn.GroupNorm(self.expansion*planes//2, self.expansion*planes)
#self.bn3 = nn.BatchNorm1d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv1d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=3):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv1d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
#self.linear = nn.Linear(512*block.expansion, num_classes)
self.linear1 = nn.Linear(19968*block.expansion, 1024)
self.linear2 = nn.Linear(1024, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
#out = F.avg_pool1d(out, 4)
out = F.avg_pool1d(out, 16)
out = out.view(out.size(0), -1)
out = self.linear1(out)
out = self.linear2(out)
return out
class ResNet_Custom(nn.Module):
def __init__(self, block, num_blocks, num_classes=3):
super(ResNet_Custom, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv1d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
#self.linear = nn.Linear(512*block.expansion, num_classes)
self.linear1 = nn.Linear(19968*block.expansion, 1024)
self.linear2 = nn.Linear(1024 + 1, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, external):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
#out = F.avg_pool1d(out, 4)
out = F.avg_pool1d(out, 16)
out = out.view(out.size(0), -1)
out = self.linear1(out)
out = [out, external]
out = torch.cat(out,dim=1)
out = self.linear2(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
# These architectures are for Nanopore Translocation Signal Features Prediction
# In our case we predict the number of translocation events inside a window in a trace
def ResNet10_Counter():
return ResNet(BasicBlock, [1, 1, 1, 1], num_classes=1)
def ResNet18_Counter():
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=1)
def ResNet34_Counter():
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=1)
def ResNet50_Counter():
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=1)
def ResNet101_Counter():
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=1)
def ResNet152_Counter():
return ResNet(Bottleneck, [3, 8, 36, 3], num_classes=1)
# These architectures are for Nanopore Translocation Signal Features Prediction
# In our case we predict Average Duration and Amplitude inside a window of translocation events in a trace
def ResNet10_Custom():
return ResNet_Custom(BasicBlock, [1, 1, 1, 1], num_classes=2)
def ResNet18_Custom():
return ResNet_Custom(BasicBlock, [2, 2, 2, 2], num_classes=2)
def ResNet34_Custom():
return ResNet_Custom(BasicBlock, [3, 4, 6, 3], num_classes=2)
def ResNet50_Custom():
return ResNet_Custom(Bottleneck, [3, 4, 6, 3], num_classes=2)
def ResNet101_Custom():
return ResNet_Custom(Bottleneck, [3, 4, 23, 3], num_classes=2)
def ResNet152_Custom():
return ResNet_Custom(Bottleneck, [3, 8, 36, 3], num_classes=2)
| StarcoderdataPython |
3551476 | from typing import Callable, Dict, List, Optional
import pytest
from web3 import Web3
from web3.contract import Contract
from raiden_contracts.constants import (
CONTRACT_MONITORING_SERVICE,
CONTRACT_ONE_TO_N,
CONTRACT_SECRET_REGISTRY,
CONTRACT_SERVICE_REGISTRY,
CONTRACT_TOKEN_NETWORK,
CONTRACT_TOKEN_NETWORK_REGISTRY,
CONTRACT_USER_DEPOSIT,
LIBRARY_TOKEN_NETWORK_UTILS,
TEST_SETTLE_TIMEOUT_MAX,
TEST_SETTLE_TIMEOUT_MIN,
MessageTypeId,
)
from raiden_contracts.contract_manager import gas_measurements
from raiden_contracts.tests.utils import call_and_transact
from raiden_contracts.tests.utils.blockchain import mine_blocks
from raiden_contracts.tests.utils.constants import DEPLOYER_ADDRESS, SERVICE_DEPOSIT, UINT256_MAX
from raiden_contracts.utils.pending_transfers import get_locked_amount, get_pending_transfers_tree
from raiden_contracts.utils.proofs import sign_one_to_n_iou, sign_reward_proof
from raiden_contracts.utils.type_aliases import BlockExpiration, ChainID, TokenAmount
@pytest.mark.parametrize("version", [None])
def test_gas_json_has_enough_fields(version: Optional[str]) -> None:
"""Check is gas.json contains enough fields"""
doc = gas_measurements(version)
keys = {
"TokenNetworkUtils DEPLOYMENT",
"CustomToken.mint",
"CustomToken.approve",
"CustomToken.transfer",
"CustomToken.transferFrom",
"MonitoringService.claimReward",
"MonitoringService.monitor",
"OneToN.claim",
"OneToN.bulkClaim 1 ious",
"OneToN.bulkClaim 6 ious",
"SecretRegistry.registerSecret",
"SecretRegistry.registerSecretBatch1",
"SecretRegistry.registerSecretBatch2",
"SecretRegistry.registerSecretBatch3",
"ServiceRegistry.deposit",
"ServiceRegistry.setURL",
"TokenNetwork DEPLOYMENT",
"TokenNetwork.closeChannel",
"TokenNetwork.openChannel",
"TokenNetwork.openChannelWithDeposit",
"TokenNetwork.setTotalDeposit",
"TokenNetwork.setTotalWithdraw",
"TokenNetwork.settleChannel",
"TokenNetwork.unlock 1 locks",
"TokenNetwork.unlock 6 locks",
"TokenNetwork.updateNonClosingBalanceProof",
"TokenNetworkRegistry DEPLOYMENT",
"TokenNetworkRegistry createERC20TokenNetwork",
"UserDeposit.deposit",
"UserDeposit.deposit (increase balance)",
"UserDeposit.planWithdraw",
"UserDeposit.withdraw",
}
assert set(doc.keys()) == keys
@pytest.fixture
def print_gas_token_network_utils(
deploy_tester_contract_txhash: Callable,
print_gas: Callable,
) -> None:
"""Abusing pytest to print the deployment gas cost of TokenNetworkUtils"""
txhash = deploy_tester_contract_txhash(LIBRARY_TOKEN_NETWORK_UTILS)
print_gas(txhash, LIBRARY_TOKEN_NETWORK_UTILS + " DEPLOYMENT")
@pytest.fixture
def print_gas_token_network_registry(
web3: Web3,
deploy_tester_contract_txhash: Callable,
secret_registry_contract: Contract,
token_network_libs: Dict,
print_gas: Callable,
) -> None:
"""Abusing pytest to print the deployment gas cost of TokenNetworkRegistry"""
txhash = deploy_tester_contract_txhash(
CONTRACT_TOKEN_NETWORK_REGISTRY,
libs=token_network_libs,
_secret_registry_address=secret_registry_contract.address,
_chain_id=web3.eth.chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_max_token_networks=10,
)
print_gas(txhash, CONTRACT_TOKEN_NETWORK_REGISTRY + " DEPLOYMENT")
@pytest.fixture
def print_gas_token_network_deployment(
web3: Web3,
get_accounts: Callable,
print_gas: Callable,
custom_token: Contract,
token_network_libs: Dict,
secret_registry_contract: Contract,
deploy_tester_contract_txhash: Callable,
channel_participant_deposit_limit: int,
token_network_deposit_limit: int,
) -> None:
"""Abusing pytest to print the deployment gas cost of TokenNetwork"""
deprecation_executor = get_accounts(1)[0]
txhash = deploy_tester_contract_txhash(
CONTRACT_TOKEN_NETWORK,
libs=token_network_libs,
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=web3.eth.chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
print_gas(txhash, CONTRACT_TOKEN_NETWORK + " DEPLOYMENT")
@pytest.fixture
def print_gas_token_network_create(
print_gas: Callable,
custom_token: Contract,
get_token_network_registry: Callable,
channel_participant_deposit_limit: int,
token_network_deposit_limit: int,
token_network_registry_constructor_args: Dict,
) -> None:
"""Abusing pytest to print gas cost of TokenNetworkRegistry's createERC20TokenNetwork()"""
registry = get_token_network_registry(**token_network_registry_constructor_args)
txn_hash = call_and_transact(
registry.functions.createERC20TokenNetwork(
custom_token.address,
channel_participant_deposit_limit,
token_network_deposit_limit,
),
{"from": DEPLOYER_ADDRESS},
)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK_REGISTRY + " createERC20TokenNetwork")
@pytest.fixture
def print_gas_secret_registry(secret_registry_contract: Contract, print_gas: Callable) -> None:
"""Abusing pytest to print gas cost of SecretRegistry's registerSecret()"""
secret = b"secretsecretsecretsecretsecretse"
txn_hash = call_and_transact(secret_registry_contract.functions.registerSecret(secret))
print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + ".registerSecret")
for batch_size in (1, 2, 3):
batch = [bytes([batch_size, i] + [0] * 30) for i in range(batch_size)]
txn_hash = call_and_transact(secret_registry_contract.functions.registerSecretBatch(batch))
print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + ".registerSecretBatch" + str(batch_size))
@pytest.fixture
def print_gas_channel_cycle(
web3: Web3,
token_network: Contract,
create_channel: Callable,
channel_deposit: Callable,
withdraw_channel: Callable,
secret_registry_contract: Contract,
get_accounts: Callable,
print_gas: Callable,
create_balance_proof: Callable,
create_balance_proof_countersignature: Callable,
) -> None:
"""Abusing pytest to print gas costs of TokenNetwork's operations"""
(A, B, C, D) = get_accounts(4)
settle_timeout = 11
(channel_identifier, txn_hash) = create_channel(A, B, settle_timeout)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".openChannel")
(_, txn_hash) = create_channel(C, D, settle_timeout)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".openChannel")
txn_hash = channel_deposit(channel_identifier, A, 20, B)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".setTotalDeposit")
txn_hash = channel_deposit(channel_identifier, B, 10, A)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".setTotalDeposit")
txn_hash = withdraw_channel(channel_identifier, A, 5, UINT256_MAX, B)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".setTotalWithdraw")
pending_transfers_tree1 = get_pending_transfers_tree(web3, [1, 1, 2, 3], [2, 1])
locksroot1 = pending_transfers_tree1.hash_of_packed_transfers
locked_amount1 = get_locked_amount(pending_transfers_tree1.transfers)
pending_transfers_tree2 = get_pending_transfers_tree(web3, [3], [], 7)
locksroot2 = pending_transfers_tree2.hash_of_packed_transfers
locked_amount2 = get_locked_amount(pending_transfers_tree2.transfers)
balance_proof_A = create_balance_proof(
channel_identifier, A, 10, locked_amount1, 5, locksroot1
)
balance_proof_update_signature_B = create_balance_proof_countersignature(
participant=B,
channel_identifier=channel_identifier,
msg_type=MessageTypeId.BALANCE_PROOF_UPDATE,
**balance_proof_A._asdict(),
)
balance_proof_B = create_balance_proof(channel_identifier, B, 5, locked_amount2, 3, locksroot2)
closing_sig_A = create_balance_proof_countersignature(
participant=A,
channel_identifier=channel_identifier,
msg_type=MessageTypeId.BALANCE_PROOF,
**balance_proof_B._asdict(),
)
for lock in pending_transfers_tree1.unlockable:
txn_hash = call_and_transact(
secret_registry_contract.functions.registerSecret(lock[3]), {"from": A}
)
print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + ".registerSecret")
for lock in pending_transfers_tree2.unlockable:
txn_hash = call_and_transact(
secret_registry_contract.functions.registerSecret(lock[3]), {"from": A}
)
print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + ".registerSecret")
txn_hash = call_and_transact(
token_network.functions.closeChannel(
channel_identifier, B, A, *balance_proof_B._asdict().values(), closing_sig_A
),
{"from": A},
)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".closeChannel")
txn_hash = call_and_transact(
token_network.functions.updateNonClosingBalanceProof(
channel_identifier,
A,
B,
*balance_proof_A._asdict().values(),
balance_proof_update_signature_B,
),
{"from": B},
)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".updateNonClosingBalanceProof")
mine_blocks(web3, settle_timeout)
txn_hash = call_and_transact(
token_network.functions.settleChannel(
channel_identifier,
B,
5,
locked_amount2,
locksroot2,
A,
10,
locked_amount1,
locksroot1,
)
)
print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + ".settleChannel")
txn_hash = call_and_transact(
token_network.functions.unlock(
channel_identifier, A, B, pending_transfers_tree2.packed_transfers
)
)
print_gas(
txn_hash,
"{0}.unlock {1} locks".format(
CONTRACT_TOKEN_NETWORK, len(pending_transfers_tree2.transfers)
),
)
txn_hash = call_and_transact(
token_network.functions.unlock(
channel_identifier, B, A, pending_transfers_tree1.packed_transfers
)
)
print_gas(
txn_hash,
"{0}.unlock {1} locks".format(
CONTRACT_TOKEN_NETWORK, len(pending_transfers_tree1.transfers)
),
)
@pytest.fixture
def print_gas_channel_open_with_deposit(
token_network: Contract,
get_accounts: Callable,
print_gas: Callable,
assign_tokens: Callable,
) -> None:
"""Abusing pytest to print gas costs of TokenNetwork's `openChannelWithDeposit`"""
(A, B) = get_accounts(2)
settle_timeout = 11
deposit = 20
assign_tokens(A, deposit)
tx_hash = call_and_transact(
token_network.functions.openChannelWithDeposit(A, B, settle_timeout, deposit),
{"from": A},
)
print_gas(tx_hash, CONTRACT_TOKEN_NETWORK + ".openChannelWithDeposit")
@pytest.fixture
def print_gas_monitoring_service(
token_network: Contract,
monitoring_service_external: Contract,
get_accounts: Callable,
create_channel: Callable,
create_balance_proof: Callable,
create_balance_proof_countersignature: Callable,
service_registry: Contract,
custom_token: Contract,
deposit_to_udc: Callable,
print_gas: Callable,
get_private_key: Callable,
create_service_account: Callable,
web3: Web3,
) -> None:
"""Abusing pytest to print gas cost of MonitoringService functions"""
# setup: two parties + MS
(A, MS) = get_accounts(2)
B = create_service_account()
reward_amount = TokenAmount(10)
deposit_to_udc(B, reward_amount)
# register MS in the ServiceRegistry contract
call_and_transact(custom_token.functions.mint(SERVICE_DEPOSIT * 2), {"from": MS})
call_and_transact(
custom_token.functions.approve(service_registry.address, SERVICE_DEPOSIT),
{"from": MS},
)
call_and_transact(service_registry.functions.deposit(SERVICE_DEPOSIT), {"from": MS})
# open a channel (c1, c2)
channel_identifier = create_channel(A, B)[0]
# create balance and reward proofs
balance_proof_A = create_balance_proof(channel_identifier, B, transferred_amount=10, nonce=1)
closing_sig_A = create_balance_proof_countersignature(
participant=A,
channel_identifier=channel_identifier,
msg_type=MessageTypeId.BALANCE_PROOF,
**balance_proof_A._asdict(),
)
balance_proof_B = create_balance_proof(channel_identifier, A, transferred_amount=20, nonce=2)
non_closing_signature_B = create_balance_proof_countersignature(
participant=B,
channel_identifier=channel_identifier,
msg_type=MessageTypeId.BALANCE_PROOF_UPDATE,
**balance_proof_B._asdict(),
)
reward_proof_signature = sign_reward_proof(
privatekey=get_private_key(B),
monitoring_service_contract_address=monitoring_service_external.address,
chain_id=token_network.functions.chain_id().call(),
token_network_address=token_network.address,
non_closing_participant=B,
reward_amount=reward_amount,
non_closing_signature=non_closing_signature_B,
)
# c1 closes channel
call_and_transact(
token_network.functions.closeChannel(
channel_identifier, B, A, *balance_proof_A._asdict().values(), closing_sig_A
),
{"from": A},
)
mine_blocks(web3, 4)
# MS calls `MSC::monitor()` using c1's BP and reward proof
txn_hash = call_and_transact(
monitoring_service_external.functions.monitor(
A,
B,
balance_proof_B.balance_hash,
balance_proof_B.nonce,
balance_proof_B.additional_hash,
balance_proof_B.original_signature,
non_closing_signature_B, # non-closing signature
reward_amount,
token_network.address, # token network address
reward_proof_signature,
),
{"from": MS},
)
print_gas(txn_hash, CONTRACT_MONITORING_SERVICE + ".monitor")
mine_blocks(web3, 1)
# MS claims the reward
txn_hash = call_and_transact(
monitoring_service_external.functions.claimReward(
channel_identifier, token_network.address, A, B
),
{"from": MS},
)
print_gas(txn_hash, CONTRACT_MONITORING_SERVICE + ".claimReward")
@pytest.fixture
def print_gas_one_to_n(
one_to_n_contract: Contract,
deposit_to_udc: Callable,
print_gas: Callable,
make_iou: Callable,
web3: Web3,
get_private_key: Callable,
create_service_account: Callable,
create_account: Callable,
) -> None:
"""Abusing pytest to print gas cost of OneToN functions"""
A = create_account()
B = create_service_account()
deposit_to_udc(A, 30)
# happy case
chain_id = web3.eth.chain_id
amount = TokenAmount(10)
expiration = BlockExpiration(web3.eth.block_number + 2)
signature = sign_one_to_n_iou(
get_private_key(A),
sender=A,
receiver=B,
amount=amount,
expiration_block=expiration,
one_to_n_address=one_to_n_contract.address,
chain_id=ChainID(chain_id),
)
txn_hash = call_and_transact(
one_to_n_contract.functions.claim(
A, B, amount, expiration, one_to_n_contract.address, signature
),
{"from": A},
)
print_gas(txn_hash, CONTRACT_ONE_TO_N + ".claim")
# bulk claims gas prices
def concat_iou_data(ious: List[Dict], key: str) -> List:
return [iou[key] for iou in ious]
def concat_iou_signatures(ious: List[Dict]) -> bytes:
result = b""
for iou in ious:
result += iou["signature"]
return result
for num_ious in (1, 6):
receivers = [create_service_account() for i in range(num_ious)]
ious = [make_iou(A, r) for r in receivers]
txn_hash = call_and_transact(
one_to_n_contract.functions.bulkClaim(
concat_iou_data(ious, "sender"),
concat_iou_data(ious, "receiver"),
concat_iou_data(ious, "amount"),
concat_iou_data(ious, "expiration_block"),
one_to_n_contract.address,
concat_iou_signatures(ious),
),
{"from": A},
)
print_gas(txn_hash, CONTRACT_ONE_TO_N + f".bulkClaim {num_ious} ious")
@pytest.fixture
def print_gas_user_deposit(
user_deposit_contract: Contract,
custom_token: Contract,
get_accounts: Callable,
web3: Web3,
print_gas: Callable,
) -> None:
"""Abusing pytest to print gas cost of UserDeposit functions
The `transfer` function is not included because it's only called by trusted
contracts as part of another function.
"""
(A,) = get_accounts(1)
call_and_transact(custom_token.functions.mint(20), {"from": A})
call_and_transact(
custom_token.functions.approve(user_deposit_contract.address, 20), {"from": A}
)
# deposit
txn_hash = call_and_transact(user_deposit_contract.functions.deposit(A, 10), {"from": A})
print_gas(txn_hash, CONTRACT_USER_DEPOSIT + ".deposit")
txn_hash = call_and_transact(user_deposit_contract.functions.deposit(A, 20), {"from": A})
print_gas(txn_hash, CONTRACT_USER_DEPOSIT + ".deposit (increase balance)")
# plan withdraw
txn_hash = call_and_transact(user_deposit_contract.functions.planWithdraw(10), {"from": A})
print_gas(txn_hash, CONTRACT_USER_DEPOSIT + ".planWithdraw")
# withdraw
withdraw_delay = user_deposit_contract.functions.withdraw_delay().call()
mine_blocks(web3, withdraw_delay)
txn_hash = call_and_transact(user_deposit_contract.functions.withdraw(10), {"from": A})
print_gas(txn_hash, CONTRACT_USER_DEPOSIT + ".withdraw")
@pytest.fixture
def print_gas_service_registry(
custom_token: Contract,
service_registry: Contract,
print_gas: Callable,
create_account: Callable,
) -> None:
A = create_account()
deposit = service_registry.functions.currentPrice().call()
call_and_transact(custom_token.functions.mint(deposit), {"from": A})
call_and_transact(
custom_token.functions.approve(service_registry.address, deposit), {"from": A}
)
deposit_tx = call_and_transact(service_registry.functions.deposit(deposit), {"from": A})
print_gas(deposit_tx, CONTRACT_SERVICE_REGISTRY + ".deposit")
url = "http://example.com"
set_url_tx = call_and_transact(service_registry.functions.setURL(url), {"from": A})
print_gas(set_url_tx, CONTRACT_SERVICE_REGISTRY + ".setURL")
@pytest.fixture
def print_gas_token(get_accounts: Callable, custom_token: Contract, print_gas: Callable) -> None:
(A, B) = get_accounts(2)
tx_hash = call_and_transact(custom_token.functions.mint(100), {"from": A})
print_gas(tx_hash, "CustomToken.mint")
tx_hash = call_and_transact(custom_token.functions.transfer(B, 100), {"from": A})
print_gas(tx_hash, "CustomToken.transfer")
tx_hash = call_and_transact(custom_token.functions.approve(A, 100), {"from": B})
print_gas(tx_hash, "CustomToken.approve")
tx_hash = call_and_transact(custom_token.functions.transferFrom(B, A, 100), {"from": A})
print_gas(tx_hash, "CustomToken.transferFrom")
# All gas printing is done in a single test. Otherwise, after a parallel
# execution of multiple gas printing tests, you see a corrupted gas.json.
@pytest.mark.slow
@pytest.mark.usefixtures(
"print_gas_token_network_utils",
"print_gas_token_network_registry",
"print_gas_token_network_deployment",
"print_gas_token_network_create",
"print_gas_secret_registry",
"print_gas_channel_cycle",
"print_gas_channel_open_with_deposit",
"print_gas_monitoring_service",
"print_gas_one_to_n",
"print_gas_service_registry",
"print_gas_user_deposit",
"print_gas_token",
)
def test_print_gas() -> None:
pass
| StarcoderdataPython |
89887 | <gh_stars>0
import logging
import logging.handlers
from katana.shared_utils.kafkaUtils import kafkaUtils
from katana.utils.sliceUtils import sliceUtils
# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = logging.handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
# Create Kafka topic
kafkaUtils.create_topic()
# Create the Kafka Consumer
consumer = kafkaUtils.create_consumer()
# Check for new messages
for message in consumer:
logger.info("--- New Message ---")
logger.info(
"Topic: {0} | Partition: {1} | Offset: {2}".format(
message.topic, message.partition, message.offset
)
)
# Commit the latest received message
consumer.commit()
action = message.value["action"]
payload = message.value["message"]
# Add slice
if action == "add":
sliceUtils.add_slice(payload)
# Delete slice
elif action == "delete":
sliceUtils.delete_slice(payload)
| StarcoderdataPython |
276368 | import numbers
import factorial
import fibonacci
import infodb
import tennis
import tree
import facclass
import factorsimp
import factorsclass
import palindrome
border = "=" * 25
banner = f"\n{border}\nPlease Select An Option\n{border}"
patterns_menu = [
["Tree", tree.pattern],
["Tennis Animation", tennis.tennis],
]
numbers_menu = [
["Number Pad", numbers.numpad],
["Number Swap", numbers.numswap],
["Info", infodb.information],
]
functions_menu = [
["Factorial Class", facclass.facclass],
["Factors Imperative", factorsimp.factorsimp],
["Factors Class", factorsclass.factorsclass],
["Palindrome", palindrome.palindrome],
["Factorial", factorial.answer],
["Fibonacci", fibonacci.fibonacci],
["Fibonacci OOP", fibonacci.testerf],
]
def menu(banner, options):
# header for menu
print(banner)
# build a dictionary from options
prompts = {0: ["Exit", None]}
for op in options:
index = len(prompts)
prompts[index] = op
# print menu or dictionary
for key, value in prompts.items():
print(key, '->', value[0])
# get user choice
choice = input("Type your choice> ")
# validate choice and run
# execute selection
# convert to number
try:
choice = int(choice)
if choice == 0:
# stop
return
try:
# try as function
action = prompts.get(choice)[1]
action()
except TypeError:
try: # try as playground style
exec(open(action).read())
except FileNotFoundError:
print(f"File not found!: {action}")
# end function try
# end prompts try
except ValueError:
# not a number error
print(f"Not a number: {choice}")
except UnboundLocalError:
# traps all other errors
print(f"Invalid choice: {choice}")
except TypeError:
print(f"Not callable {action}")
# end validation try
menu(banner, options) # recursion, start menu over again
def _patterns_menu():
title = "Patterns"
menu(title, patterns_menu)
def _numbers_menu():
title = "Numbers"
menu(title, numbers_menu)
def _functions_menu():
title = "Math Functions"
menu(title, functions_menu)
def driver():
title = "Main Menu"
menu_list = [["Patterns", _patterns_menu],
["Numbers", _numbers_menu],
["Math Functions", _functions_menu]]
menu(title, menu_list)
if __name__ == "__main__":
driver()
| StarcoderdataPython |
22782 | <gh_stars>10-100
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script parses metrics from Cluster Autoscaler e2e tests.
'''
from __future__ import division
from __future__ import print_function
import argparse
import json
class CAMetric(object):
def __init__(self, function_name):
self.function_name = function_name
self.sum = 0.0
self.average = 0.0
self.buckets = []
self.count = 0
self.upper_bound = 0.0
def print(self):
print(self.function_name, '\t', self.sum, '\t', self.count,'\t', self.avg,
'\t', self.upper_bound)
print(self.buckets)
def print_summary(summary):
print('function_name\t sum\t count\t avg\t upper_bound')
print('buckets')
for metric in summary.values():
metric.print()
def function_name(sample):
return sample['metric']['function']
def metric_value(sample):
return sample['value'][1]
def upper_bound(buckets):
'''
Going from the rightmost bucket, find the first one that has some samples
and return its upper bound.
'''
for i in xrange(len(buckets) - 1, -1, -1):
le, count = buckets[i]
if i == 0:
return le
else:
le_prev, count_prev = buckets[i-1]
if count_prev < count:
return le
def parse_metrics_file(metrics_file):
'''
Return interesting metrics for all Cluster Autoscaler functions.
Merics are stored in a map keyed by function name and are expressed in
seconds. They include
* sum of all samples
* count of sumples
* average value of samples
* upper bound - all collected samples were smaller than this value
* buckets - list of tuples (# of samples, bucket upper bound)
'''
summary = {}
with open(metrics_file) as metrics_file:
summary = {}
metrics = json.load(metrics_file)
ca_metrics = metrics['ClusterAutoscalerMetrics']
total_sum = ca_metrics['cluster_autoscaler_function_duration_seconds_sum']
for sample in total_sum:
function = function_name(sample)
summary[function] = CAMetric(function)
summary[function].sum = float(metric_value(sample))
count = ca_metrics['cluster_autoscaler_function_duration_seconds_count']
for sample in count:
function = function_name(sample)
summary[function].count = int(metric_value(sample))
summary[function].avg = summary[function].sum / summary[function].count
buckets = ca_metrics['cluster_autoscaler_function_duration_seconds_bucket']
for sample in buckets:
function = function_name(sample)
summary[function].buckets.append(
(float(sample['metric']['le']), int(metric_value(sample))))
for value in summary.values():
value.upper_bound = upper_bound(value.buckets)
return summary
def main():
parser = argparse.ArgumentParser(description='Parse metrics from Cluster Autoscaler e2e test')
parser.add_argument('metrics_file', help='File to read metrics from')
args = parser.parse_args()
summary = parse_metrics_file(args.metrics_file)
print_summary(summary)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5138198 | <reponame>shizhediao/SEDST3
def clean_replace(s, r, t, forward=True, backward=False):
def clean_replace_single(s, r, t, forward, backward, sidx=0):
idx = s[sidx:].find(r)
if idx == -1:
return s, -1
#idx += sidx
idx_r = idx + len(r)
if backward:
while idx > 0 and s[idx - 1]:
idx -= 1
elif idx > 0 and s[idx - 1] != ' ':
return s, -1
if forward:
while idx_r < len(s) and (s[idx_r].isalpha() or s[idx_r].isdigit()):
idx_r += 1
elif idx_r != len(s) and (s[idx_r].isalpha() or s[idx_r].isdigit()):
return s, -1
return s[:idx] + t + s[idx_r:], idx_r
sidx = 0
while sidx != -1:
s, sidx = clean_replace_single(s, r, t, forward, backward, sidx)
return s
s = '<GO> okay , i have set a reminder for your conference with bos to be with a conference with bos . </s>'
r = 'conference'
t = 'event_SLOT'
backward = False
forward = True
re = clean_replace(s, r, t)
print(re)
| StarcoderdataPython |
6454102 | # -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2012 <NAME>
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
from . import IGNORE_ERRORS_ON_CLOSE, b, PY3K, NullHandler, IS_JYTHON
from .connection import Connection
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method
\ # (single space)
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # (single space)
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
%s %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = 'HTTP/1.1'
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.'+self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in val.args[0]:
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(client_ip = conn.client_addr,
time = datetime.now().strftime('%c'),
status = self.status.split(' ')[0],
size = self.size,
request_line = self.request_line)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (self.protocol,
status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
d = d.strip()
if not d:
if __debug__:
self.err_log.debug('Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k,v in req.items():
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join([unquote(x) for x in re_SLASH.split(v)])
self.protocol = req['protocol']
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol = proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
l = sock_file.readline()
lname = None
lval = None
while True:
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning('Client sent invalid header: ' + repr(l))
if l == '\r\n':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ',' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
l = sock_file.readline()
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"Exception for when a socket times out between requests."
pass
class BadRequest(Exception):
"Exception for when a client sends an incomprehensible request."
pass
class SocketClosed(Exception):
"Exception for when a socket is closed by the client."
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
from .methods.wsgi import WSGIWorker
from .methods.fs import FileSystemWorker
methods = dict(wsgi=WSGIWorker,
fs=FileSystemWorker)
return methods[method.lower()]
| StarcoderdataPython |
4992960 | <filename>corditea/__init__.py
from .crop_array import CropArray
from .gamma_augment import GammaAugment
from .impulse_noise_augment import ImpulseNoiseAugment
from .intensity_crop import IntensityCrop
from .lambda_filter import LambdaFilter
from .lambda_source import LambdaSource
from .multiply import Multiply
from .random_location_with_integral_mask import RandomLocationWithIntegralMask
from .reject_efficiently import RejectEfficiently
from .tf_run.py import Run
from .sum.py import Sum | StarcoderdataPython |
176484 | <gh_stars>1-10
import pickle
def pickle_dump(file_path, file):
with open(file_path, 'wb') as f:
pickle.dump(file, f)
# print(f'Logging Info - Saved: {file_path}')
def pickle_load(file_path):
try:
with open(file_path, 'rb') as f:
obj = pickle.load(f)
# print(f'Logging Info - Loaded: {file_path}')
except EOFError:
# print(f'Logging Error - Cannot load: {file_path}')
obj = None
return obj
| StarcoderdataPython |
5049076 | <gh_stars>1-10
from bs4 import BeautifulSoup as bs
import datetime
def extract(filename):
with open(filename) as file:
soup = bs(file, features="html.parser")
classes_html = soup.find_all("textarea", {"class": "campo"})
time_html = soup.find_all("td", {"class": "horario"})
infos = soup.find_all("td", {"class": "linha_tabela"})
keys = [x for x in range(0, 6)]
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
weekdays = ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
key2day = dict(zip(keys, days))
day2key = dict(zip(days, keys))
day2abrev = dict(zip(days, weekdays))
daily_events = {k: [] for k in days}
class_code = {k: [] for k in keys}
class_time = [time.contents[0] for time in time_html]
aux = len(class_time)
time_counter = 0
for idx, text in enumerate(classes_html):
if text.contents != []:
code, place = text.contents[0].split('\n')
class_code[idx % 6].append((class_time[time_counter], code, place))
if idx % 6 == 5:
time_counter += 1
code2name = {}
end = len(infos)
for idx in range(0, end, 4):
code = infos[idx].contents[0]
name = infos[idx + 2].contents[0]
code2name[code] = name
for key, classes in class_code.items():
daily_events[key2day[key]] = extract_periods(classes)
return daily_events, day2abrev, day2key, code2name
def build_interval(begin, end, code, place):
end_h, end_m = end.split(":")
end = datetime.timedelta(hours=int(end_h), minutes=int(end_m)) + datetime.timedelta(minutes=50)
return ((begin, str(end)), code, place)
def extract_periods(classes):
l = len(classes)
intervals = []
begin_aux = 0
end_aux = 0
for x in range(1, l):
if classes[x - 1][1] == classes[x][1]:
end_aux = x
else:
intervals.append(build_interval(classes[begin_aux][0], classes[end_aux][0], classes[begin_aux][1], classes[begin_aux][2]))
begin_aux = x
end_aux = x
if x == l - 1:
end_aux = x
intervals.append(build_interval(classes[begin_aux][0], classes[end_aux][0], classes[begin_aux][1], classes[begin_aux][2]))
return intervals
def extractor(filename):
return extract(filename)
| StarcoderdataPython |
3331402 | <reponame>g-jami/COMPAS-II-FS2021<gh_stars>10-100
import math
from compas.datastructures import Mesh
from compas.geometry import Point, Vector, Frame, Circle, Plane, Line
from compas.geometry import Cylinder, Box
from compas.geometry import Transformation, Translation
from compas.utilities import pairwise
from compas.robots import RobotModel
from compas.robots import Joint
from compas_rhino.artists import FrameArtist, LineArtist, CylinderArtist
from compas_rhino.artists import RobotModelArtist
# ==============================================================================
# Model
# ==============================================================================
robot = RobotModel('Robot-Hand')
# ==============================================================================
# Links
# ==============================================================================
cylinder = Cylinder(Circle(Plane(Point(0, 0, 0), Vector(0, 0, 1)), 0.5), 0.02)
mesh = Mesh.from_shape(cylinder, u=32)
base = robot.add_link(
'base',
visual_meshes=[mesh],
visual_color=(0.1, 0.1, 0.1)
)
cylinder = Cylinder(Circle(Plane(Point(0, 0, 0), Vector(0, 0, 1)), 0.2), 0.5)
cylinder.transform(Translation.from_vector([0, 0, 0.25]))
mesh = Mesh.from_shape(cylinder, u=24)
shoulder = robot.add_link(
'shoulder',
visual_meshes=[mesh],
visual_color=(0, 0, 1.0)
)
cylinder = Cylinder(Circle(Plane(Point(0, 0, 0), Vector(0, 0, 1)), 0.08), 1.0)
cylinder.transform(Translation.from_vector([0, 0, 0.5]))
mesh = Mesh.from_shape(cylinder)
arm = robot.add_link(
'arm',
visual_meshes=[mesh],
visual_color=(0.0, 1.0, 1.0)
)
cylinder = Cylinder(Circle(Plane(Point(0, 0, 0), Vector(0, 0, 1)), 0.08), 1.0)
cylinder.transform(Translation.from_vector([0, 0, 0.5]))
mesh = Mesh.from_shape(cylinder)
forearm = robot.add_link(
'forearm',
visual_meshes=[mesh],
visual_color=(0.0, 1.0, 1.0)
)
cylinder = Cylinder(Circle(Plane(Point(0, 0, 0), Vector(0, 0, 1)), 0.11), 0.01)
mesh = Mesh.from_shape(cylinder, u=32)
wrist = robot.add_link(
'wrist',
visual_meshes=[mesh],
visual_color=(0.1, 0.1, 0.1)
)
box = Box.from_width_height_depth(0.04, 0.3, 0.22)
box.transform(Translation.from_vector([0, 0, 0.15]))
mesh = Mesh.from_shape(box)
hand = robot.add_link(
'hand',
visual_meshes=[mesh],
visual_color=(0, 0, 1.0)
)
# ==============================================================================
# Joints
# ==============================================================================
base_joint = robot.add_joint(
'base-shoulder',
Joint.REVOLUTE,
base, shoulder,
origin=Frame(Point(0, 0, 0), Vector(1, 0, 0), Vector(0, 1, 0)),
axis=Vector(0, 0, 1),
limit=(-0.5 * math.pi, +0.5 * math.pi)
)
shoulder_joint = robot.add_joint(
'shoulder-arm',
Joint.REVOLUTE,
shoulder, arm,
origin=Frame(Point(0, 0, 0.5), Vector(1, 0, 0), Vector(0, 1, 0)),
axis=Vector(0, 1, 0),
limit=(-0.5 * math.pi, +0.5 * math.pi)
)
elbow_joint = robot.add_joint(
'arm-forearm',
Joint.REVOLUTE,
arm, forearm,
origin=Frame(Point(0, 0, 1.0), Vector(1, 0, 0), Vector(0, 1, 0)),
axis=Vector(0, 1, 0),
limit=(-0.5 * math.pi, +0.5 * math.pi)
)
wrist_joint = robot.add_joint(
'forearm-wrist',
Joint.REVOLUTE,
forearm, wrist,
origin=Frame(Point(0, 0, 1.0), Vector(1, 0, 0), Vector(0, 1, 0)),
axis=Vector(0, 1, 0),
limit=(-0.5 * math.pi, +0.5 * math.pi)
)
hand_joint = robot.add_joint(
'wrist-hand',
Joint.CONTINUOUS,
wrist, hand,
origin=Frame(Point(0, 0, 0.0), Vector(1, 0, 0), Vector(0, 1, 0)),
axis=Vector(0, 0, 1)
)
# ==============================================================================
# State
# ==============================================================================
names = robot.get_configurable_joint_names()
values = [+0.25 * math.pi, -0.25 * math.pi, +0.5 * math.pi, 0, +0.25 * math.pi]
state = dict(zip(names, values))
transformations = robot.compute_transformations(state)
frames = []
axes = []
for joint in robot.iter_joints():
frame = joint.origin.transformed(transformations[joint.name])
frame.name = joint.name
frames.append(frame)
axis = joint.axis.transformed(transformations[joint.name])
axis.name = joint.name
axes.append(axis)
artist = RobotModelArtist(robot, layer="Robot")
artist.clear_layer()
artist.update(state, collision=False)
artist.draw()
| StarcoderdataPython |
1874824 | <filename>rundeck-libext/cache/py-winrm-plugin-2.0.13/winrm-filecopier.py
try:
import os; os.environ['PATH']
except:
import os
os.environ.setdefault('PATH', '')
import winrm
import argparse
import sys
import base64
import time
import common
import logging
import ntpath
import xml.etree.ElementTree as ET
import colored_formatter
from colored_formatter import ColoredFormatter
import kerberosauth
#checking and importing dependencies
ISPY3 = sys.version_info[0] == 3
WINRM_INSTALLED = False
URLLIB_INSTALLED = False
KRB_INSTALLED = False
HAS_NTLM = False
HAS_CREDSSP = False
HAS_PEXPECT = False
if ISPY3:
from inspect import getfullargspec as getargspec
else:
from inspect import getargspec
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
URLLIB_INSTALLED = True
except ImportError as e:
URLLIB_INSTALLED = False
try:
import winrm
WINRM_INSTALLED = True
except ImportError as e:
WINRM_INSTALLED = False
try:
from requests_kerberos import HTTPKerberosAuth, REQUIRED, OPTIONAL, DISABLED
KRB_INSTALLED = True
except ImportError:
KRB_INSTALLED = False
try:
from requests_ntlm import HttpNtlmAuth
HAS_NTLM = True
except ImportError as ie:
HAS_NTLM = False
try:
from requests_credssp import HttpCredSSPAuth
HAS_CREDSSP = True
except ImportError as ie:
HAS_CREDSSP = False
try:
import pexpect
if hasattr(pexpect, 'spawn'):
argspec = getargspec(pexpect.spawn.__init__)
if 'echo' in argspec.args:
HAS_PEXPECT = True
except ImportError as e:
HAS_PEXPECT = False
log_level = 'INFO'
if os.environ.get('RD_JOB_LOGLEVEL') == 'DEBUG':
log_level = 'DEBUG'
else:
log_level = 'ERROR'
##end
log_level = 'INFO'
if os.environ.get('RD_JOB_LOGLEVEL') == 'DEBUG':
log_level = 'DEBUG'
else:
log_level = 'ERROR'
console = logging.StreamHandler()
console.setFormatter(ColoredFormatter(colored_formatter.format()))
console.stream=sys.stdout
log = logging.getLogger()
log.addHandler(console)
log.setLevel(log_level)
def _clean_error_msg(self, msg):
"""converts a Powershell CLIXML message to a more human readable string
"""
# TODO prepare unit test, beautify code
# if the msg does not start with this, return it as is
if type(msg) == bytes and msg.startswith(b"#< CLIXML\r\n"):
# for proper xml, we need to remove the CLIXML part
# (the first line)
msg_xml = msg[11:]
try:
# remove the namespaces from the xml for easier processing
msg_xml = self._strip_namespace(msg_xml)
root = ET.fromstring(msg_xml)
# the S node is the error message, find all S nodes
nodes = root.findall("./S")
new_msg = ""
for s in nodes:
# append error msg string to result, also
# the hex chars represent CRLF so we replace with newline
print(s.text)
new_msg += s.text.replace("_x000D__x000A_", "\n")
except Exception as e:
# if any of the above fails, the msg was not true xml
# print a warning and return the orignal string
# TODO do not print, raise user defined error instead
print("Warning: there was a problem converting the Powershell"
" error message: %s" % (e))
else:
# if new_msg was populated, that's our error message
# otherwise the original error message will be used
if len(new_msg):
# remove leading and trailing whitespace while we are here
msg = new_msg.strip()
return msg
requests.packages.urllib3.disable_warnings()
if os.environ.get('RD_CONFIG_DEBUG') == 'true':
log_level = 'DEBUG'
else:
log_level = 'ERROR'
logging.basicConfig(
stream=sys.stdout,
level=getattr(logging, log_level),
format='%(levelname)s: %(name)s: %(message)s'
)
log = logging.getLogger('winrm-filecopier')
class RemoteCommandError(Exception):
def __init__(self, command, return_code, std_out='', std_err=''):
super(RemoteCommandError, self).__init__(
'Remote execution of "{}" failed with exit code {}. '
'STDOUT: {}. STDERR: {}.'.format(
command, return_code, std_out, std_err
)
)
class WinRmError(RemoteCommandError):
pass
class CopyFiles(object):
def __init__(self, session):
self.session=session
def winrm_upload(self,
remote_path,
remote_filename,
local_path,
step=2048,
quiet=True):
if remote_path.endswith('/') or remote_path.endswith('\\'):
full_path = remote_path + remote_filename
else:
full_path = remote_path + "\\" + remote_filename
print("coping file %s to %s" % (local_path, full_path))
self.session.run_ps('if (!(Test-Path {0})) {{ New-Item -ItemType directory -Path {0} }}'.format(remote_path))
size = os.stat(local_path).st_size
with open(local_path, 'rb') as f:
for i in range(0, size, step):
script = (
'add-content -value '
'$([System.Convert]::FromBase64String("{}")) '
'-encoding byte -path {}'.format(
base64.b64encode(f.read(step)).decode(),
full_path
)
)
while True:
result = self.session.run_ps(script)
code=result.status_code
stdout=result.std_out
stderr=result.std_err
if code == 0:
break
elif code == 1 and 'used by another process' in stderr:
time.sleep(0.1)
else:
raise WinRmError(script, code, stdout, stderr)
if not quiet:
transferred = i + step
if transferred > size:
transferred = size
progress_blocks = transferred * 30 // size
percentage_string = str(
(100 * transferred) // size
) + ' %'
percentage_string = (
' ' * (10 - len(percentage_string)) +
percentage_string
)
print(percentage_string)
sys.stdout.flush()
parser = argparse.ArgumentParser(description='Run Bolt command.')
parser.add_argument('hostname', help='the hostname')
parser.add_argument('source', help='Source File')
parser.add_argument('destination', help='Destination File')
args = parser.parse_args()
#it is necesarry to avoid the debug error
print(args.destination)
password=<PASSWORD>
authentication = "basic"
transport = "http"
port = "5985"
nossl = False
debug = False
diabletls12 = False
kinit = None
krb5config = None
krbdelegation = False
forceTicket = False
if "RD_CONFIG_AUTHTYPE" in os.environ:
authentication = os.getenv("RD_CONFIG_AUTHTYPE")
if "RD_CONFIG_WINRMTRANSPORT" in os.environ:
transport = os.getenv("RD_CONFIG_WINRMTRANSPORT")
if "RD_CONFIG_WINRMPORT" in os.environ:
port = os.getenv("RD_CONFIG_WINRMPORT")
if "RD_CONFIG_NOSSL" in os.environ:
if os.getenv("RD_CONFIG_NOSSL") == "true":
nossl = True
else:
nossl = False
if "RD_CONFIG_DISABLETLS12" in os.environ:
if os.getenv("RD_CONFIG_DISABLETLS12") == "true":
diabletls12 = True
else:
diabletls12 = False
if "RD_CONFIG_CERTPATH" in os.environ:
certpath = os.getenv("RD_CONFIG_CERTPATH")
if "RD_OPTION_USERNAME" in os.environ and os.getenv("RD_OPTION_USERNAME"):
#take user from job
username = os.getenv("RD_OPTION_USERNAME").strip('\'')
else:
# take user from node
if "RD_NODE_USERNAME" in os.environ and os.getenv("RD_NODE_USERNAME"):
username = os.getenv("RD_NODE_USERNAME").strip('\'')
else:
# take user from project
if "RD_CONFIG_USERNAME" in os.environ and os.getenv("RD_CONFIG_USERNAME"):
username = os.getenv("RD_CONFIG_USERNAME").strip('\'')
if "RD_OPTION_WINRMPASSWORD" in os.environ and os.getenv("RD_OPTION_WINRMPASSWORD"):
#take password from job
password = os.getenv("RD_OPTION_WINRMPASSWORD").strip('\'')
else:
if "RD_CONFIG_PASSWORD_STORAGE_PATH" in os.environ:
password = os.getenv("RD_CONFIG_PASSWORD_STORAGE_PATH")
quiet = True
if "RD_CONFIG_DEBUG" in os.environ:
quiet = False
if "RD_CONFIG_KRB5CONFIG" in os.environ:
krb5config = os.getenv("RD_CONFIG_KRB5CONFIG")
if "RD_CONFIG_KINIT" in os.environ:
kinit = os.getenv("RD_CONFIG_KINIT")
if "RD_CONFIG_KRBDELEGATION" in os.environ:
if os.getenv("RD_CONFIG_KRBDELEGATION") == "true":
krbdelegation = True
else:
krbdelegation = False
endpoint = transport+'://'+args.hostname+':'+port
arguments = {}
arguments["transport"] = authentication
if(nossl == True):
arguments["server_cert_validation"] = "ignore"
else:
if(transport=="https"):
arguments["server_cert_validation"] = "validate"
arguments["ca_trust_path"] = certpath
arguments["credssp_disable_tlsv1_2"] = diabletls12
if not URLLIB_INSTALLED:
log.error("request and urllib3 not installed, try: pip install requests && pip install urllib3")
sys.exit(1)
if not WINRM_INSTALLED:
log.error("winrm not installed, try: pip install pywinrm")
sys.exit(1)
if authentication == "kerberos" and not KRB_INSTALLED:
log.error("Kerberos not installed, try: pip install pywinrm[kerberos]")
sys.exit(1)
if authentication == "kerberos" and not HAS_PEXPECT:
log.error("pexpect not installed, try: pip install pexpect")
sys.exit(1)
if authentication == "credssp" and not HAS_CREDSSP:
log.error("CredSSP not installed, try: pip install pywinrm[credssp]")
sys.exit(1)
if authentication == "ntlm" and not HAS_NTLM:
log.error("NTLM not installed, try: pip install requests_ntlm")
sys.exit(1)
if authentication == "kerberos":
k5bConfig = kerberosauth.KerberosAuth(krb5config=krb5config, log=log, kinit_command=kinit,username=username, password=password)
k5bConfig.get_ticket()
arguments["kerberos_delegation"] = krbdelegation
session = winrm.Session(target=endpoint,
auth=(username, password),
**arguments)
winrm.Session._clean_error_msg = _clean_error_msg
copy = CopyFiles(session)
destination = args.destination
filename = ntpath.basename(args.destination)
if filename is None:
filename = os.path.basename(args.source)
if filename in args.destination:
destination = destination.replace(filename, '')
else:
isFile = common.check_is_file(args.destination)
if isFile:
filename = common.get_file(args.destination)
destination = destination.replace(filename, '')
else:
filename = os.path.basename(args.source)
if not os.path.isdir(args.source):
copy.winrm_upload(remote_path=destination,
remote_filename=filename,
local_path=args.source,
quiet=quiet)
else:
log.warn("The source is a directory, skipping copy")
| StarcoderdataPython |
6450894 | # Question 8
num1 = float(input("Enter 1st number: "))
num2 = float(input("Enter 2nd number: "))
num3 = float(input("Enter 3rd number: "))
sums = num1 + num2 + num3
if num1 == num2 == num3:
print(sums * 3)
| StarcoderdataPython |
1628101 | <filename>heroku3/core.py
# -*- coding: utf-8 -*-
"""
heroku3.core
~~~~~~~~~~~
This module provides the base entrypoint for heroku3.py.
"""
from .api import Heroku
import requests
def from_key(api_key, session=None, **kwargs):
"""Returns an authenticated Heroku instance, via API Key."""
if not session:
session = requests.session()
# If I'm being passed an API key then I should use only this api key
# if trust_env=True then Heroku will silently fallback to netrc authentication
session.trust_env = False
h = Heroku(session=session, **kwargs)
# Login.
h.authenticate(api_key)
return h
| StarcoderdataPython |
3566227 | <reponame>PeloriTech/Osiris
import requests
from osiris_server.settings import TENSORFLOW_SERVE_URL
class TensorflowServeGateway:
url_predict= '/v1/models/{}:predict'
@staticmethod
def predict(jpeg_bytes, model) -> int:
server_url = TENSORFLOW_SERVE_URL
server_url += TensorflowServeGateway.url_predict.format(model)
predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes
response = requests.post(server_url, data=predict_request)
return response.json()['predictions'][0]['classes'] | StarcoderdataPython |
1919182 | <gh_stars>10-100
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from ..station import StationDb
def set_background(stations="black"):
"""Display the map background (earth and stations)
Args:
stations (str): If non empty, provides the matplotlib color to be
used for stations marks. To disable stations marks, set to
``False``
"""
path = Path(__file__).parent.parent / "static/earth.png"
im = plt.imread(str(path))
plt.imshow(im, extent=[-180, 180, -90, 90])
plt.xlim([-180, 180])
plt.ylim([-90, 90])
plt.grid(True, linestyle=":", alpha=0.4)
plt.xticks(range(-180, 181, 30))
plt.yticks(range(-90, 91, 30))
if stations:
for station in StationDb.list().values():
lat, lon = np.degrees(station.latlonalt[:-1])
plt.plot([lon], [lat], "+", color=stations)
plt.text(lon + 1, lat + 1, station.abbr, color=stations)
| StarcoderdataPython |
8018222 | <filename>pywikibot/family.py
# -*- coding: utf-8 -*-
#
# (C) Pywikipedia bot team, 2004-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: 68f136e606fe94a1cbfa585bdd9cfdfb5b51f1b2 $'
import logging
import re
import urllib
import config2 as config
import pywikibot
logger = logging.getLogger("pywiki.wiki.family")
# Parent class for all wiki families
class Family(object):
def __init__(self):
if not hasattr(self, 'name'):
self.name = None
# For interwiki sorting order see
# http://meta.wikimedia.org/wiki/Interwiki_sorting_order
# The sorting order by language name from meta
# MediaWiki:Interwiki_config-sorting_order-native-languagename
self.alphabetic = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'as', 'ast', 'gn', 'av', 'ay', 'az', 'bm',
'bn', 'bjn', 'zh-min-nan', 'nan', 'map-bms', 'ba', 'be', 'be-x-old',
'bh', 'bcl', 'bi', 'bg', 'bar', 'bo', 'bs', 'br', 'bxr', 'ca', 'cv',
'ceb', 'cs', 'ch', 'cbk-zam', 'ny', 'sn', 'tum', 'cho', 'co', 'cy',
'da', 'dk', 'pdc', 'de', 'dv', 'nv', 'dsb', 'dz', 'mh', 'et', 'el',
'eml', 'en', 'myv', 'es', 'eo', 'ext', 'eu', 'ee', 'fa', 'hif',
'fo', 'fr', 'fy', 'ff', 'fur', 'ga', 'gv', 'gag', 'gd', 'gl', 'gan',
'ki', 'glk', 'gu', 'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy',
'hi', 'ho', 'hsb', 'hr', 'io', 'ig', 'ilo', 'bpy', 'id', 'ia', 'ie',
'iu', 'ik', 'os', 'xh', 'zu', 'is', 'it', 'he', 'jv', 'kl', 'kn',
'kr', 'pam', 'krc', 'ka', 'ks', 'csb', 'kk', 'kw', 'rw', 'rn', 'sw',
'kv', 'kg', 'ht', 'ku', 'kj', 'ky', 'mrj', 'lad', 'lbe', 'lez',
'lo', 'ltg', 'la', 'lv', 'lb', 'lt', 'lij', 'li', 'ln', 'jbo', 'lg',
'lmo', 'hu', 'mk', 'mg', 'ml', 'mt', 'mi', 'mr', 'xmf', 'arz',
'mzn', 'ms', 'min', 'cdo', 'mwl', 'mdf', 'mo', 'mn', 'mus', 'my',
'nah', 'na', 'fj', 'nl', 'nds-nl', 'cr', 'ne', 'new', 'ja', 'nap',
'ce', 'frr', 'pih', 'no', 'nb', 'nn', 'nrm', 'nov', 'ii', 'oc',
'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa', 'pi', 'pfl', 'pag',
'pnb', 'pap', 'ps', 'koi', 'km', 'pcd', 'pms', 'tpi', 'nds', 'pl',
'tokipona', 'tp', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh',
'ro', 'rmy', 'rm', 'qu', 'rue', 'ru', 'sah', 'se', 'sm', 'sa', 'sg',
'sc', 'sco', 'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple',
'sd', 'ss', 'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh',
'su', 'fi', 'sv', 'tl', 'ta', 'shi', 'kab', 'roa-tara', 'tt', 'te',
'tet', 'th', 'ti', 'tg', 'to', 'chr', 'chy', 've', 'tr', 'tk', 'tw',
'udm', 'bug', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vi', 'vo',
'fiu-vro', 'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts',
'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw',
'zh-cn',
]
# The revised sorting order by first word from meta
# MediaWiki:Interwiki_config-sorting_order-native-languagename-firstword
self.alphabetic_revised = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'as', 'ast', 'gn', 'av', 'ay', 'az', 'bjn',
'id', 'ms', 'bm', 'bn', 'zh-min-nan', 'nan', 'map-bms', 'jv', 'su',
'ba', 'min', 'be', 'be-x-old', 'bh', 'bcl', 'bi', 'bar', 'bo', 'bs',
'br', 'bug', 'bg', 'bxr', 'ca', 'ceb', 'cv', 'cs', 'ch', 'cbk-zam',
'ny', 'sn', 'tum', 'cho', 'co', 'cy', 'da', 'dk', 'pdc', 'de', 'dv',
'nv', 'dsb', 'na', 'dz', 'mh', 'et', 'el', 'eml', 'en', 'myv', 'es',
'eo', 'ext', 'eu', 'ee', 'fa', 'hif', 'fo', 'fr', 'fy', 'ff', 'fur',
'ga', 'gv', 'sm', 'gag', 'gd', 'gl', 'gan', 'ki', 'glk', 'gu',
'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy', 'hi', 'ho', 'hsb',
'hr', 'io', 'ig', 'ilo', 'bpy', 'ia', 'ie', 'iu', 'ik', 'os', 'xh',
'zu', 'is', 'it', 'he', 'kl', 'kn', 'kr', 'pam', 'ka', 'ks', 'csb',
'kk', 'kw', 'rw', 'ky', 'rn', 'mrj', 'sw', 'kv', 'kg', 'ht', 'ku',
'kj', 'lad', 'lbe', 'lez', 'lo', 'la', 'ltg', 'lv', 'to', 'lb',
'lt', 'lij', 'li', 'ln', 'jbo', 'lg', 'lmo', 'hu', 'mk', 'mg', 'ml',
'krc', 'mt', 'mi', 'mr', 'xmf', 'arz', 'mzn', 'cdo', 'mwl', 'koi',
'mdf', 'mo', 'mn', 'mus', 'my', 'nah', 'fj', 'nl', 'nds-nl', 'cr',
'ne', 'new', 'ja', 'nap', 'ce', 'frr', 'pih', 'no', 'nb', 'nn',
'nrm', 'nov', 'ii', 'oc', 'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa',
'pi', 'pfl', 'pag', 'pnb', 'pap', 'ps', 'km', 'pcd', 'pms', 'nds',
'pl', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh', 'ro', 'rmy',
'rm', 'qu', 'ru', 'rue', 'sah', 'se', 'sa', 'sg', 'sc', 'sco',
'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple', 'sd', 'ss',
'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh', 'fi', 'sv',
'tl', 'ta', 'shi', 'kab', 'roa-tara', 'tt', 'te', 'tet', 'th', 'vi',
'ti', 'tg', 'tpi', 'tokipona', 'tp', 'chr', 'chy', 've', 'tr', 'tk',
'tw', 'udm', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vo', 'fiu-vro',
'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts', 'yi', 'yo',
'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw', 'zh-cn',
]
# Order for fy: alphabetical by code, but y counts as i
def fycomp(x, y):
x = x.replace("y", "i") + x.count("y") * "!"
y = y.replace("y", "i") + y.count("y") * "!"
return cmp(x, y)
self.fyinterwiki = self.alphabetic[:]
self.fyinterwiki.remove('nb')
self.fyinterwiki.sort(fycomp)
self.langs = {}
self.namespacesWithSubpage = [2] + range(1, 16, 2)
# letters that can follow a wikilink and are regarded as part of
# this link
# This depends on the linktrail setting in LanguageXx.php and on
# [[MediaWiki:Linktrail]].
# Note: this is a regular expression.
self.linktrails = {
'_default': u'[a-z]*',
'ca': u'[a-zàèéíòóúç·ïü]*',
'cs': u'[a-záčďéěíňóřšťúůýž]*',
'de': u'[a-zäöüß]*',
'da': u'[a-zæøå]*',
'es': u'[a-záéíóúñ]*',
'fa': u'[a-zابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة]*',
'fi': u'[a-zäö]*',
'fr': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'frr': u'[a-zäöüßåāđē]*',
'he': u'[a-zא-ת]*',
'hu': u'[a-záéíóúöüőűÁÉÍÓÚÖÜŐŰ]*',
'it': u'[a-zàéèíîìóòúù]*',
'ka': u'[a-zაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ“»]*',
'kk': u'[a-zäçéğıïñöşüýʺʹа-яёәғіқңөұүһٴابپتجحدرزسشعفقكلمنڭەوۇۋۆىيچھ“»]*',
'ksh': u'[a-zäöüėëijßəğåůæœç]*',
'mk': u'[a-zабвгдѓежзѕијклљмнњопрстќуфхцчџш]*',
'nl': u'[a-zäöüïëéèàë]*',
'pl': u'[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*',
'pt': u'[a-záâàãéêíóôõúüç]*',
'ro': u'[a-zăâîşţșțĂÂÎŞŢȘȚ]*',
'ru': u'[a-zа-я]*',
'sk': u'[a-záäčďéíľĺňóôŕšťúýž]*',
}
# Wikimedia wikis all use "bodyContent" as the id of the <div>
# element that contains the actual page content; change this for
# wikis that use something else (e.g., mozilla family)
self.content_id = "bodyContent"
# A dictionary where keys are family codes that can be used in
# inter-family interwiki links. Do not use it directly but
# get_known_families() instead.
# TODO: replace this with API interwikimap call
self.known_families = {
'abbenormal': 'abbenormal',
'acronym': 'acronym',
'advisory': 'advisory',
'advogato': 'advogato',
'aew': 'aew',
'airwarfare': 'airwarfare',
'aiwiki': 'aiwiki',
'allwiki': 'allwiki',
'appropedia': 'appropedia',
'aquariumwiki': 'aquariumwiki',
'arxiv': 'arxiv',
'aspienetwiki': 'aspienetwiki',
'atmwiki': 'atmwiki',
'b': 'wikibooks',
'battlestarwiki': 'battlestarwiki',
'bemi': 'bemi',
'benefitswiki': 'benefitswiki',
'betawiki': 'betawiki',
'betawikiversity': 'betawikiversity',
'biblewiki': 'biblewiki',
'bluwiki': 'bluwiki',
'botwiki': 'botwiki',
'boxrec': 'boxrec',
'brickwiki': 'brickwiki',
'bridgeswiki': 'bridgeswiki',
'bugzilla': 'bugzilla',
'buzztard': 'buzztard',
'bytesmiths': 'bytesmiths',
'c2': 'c2',
'c2find': 'c2find',
'cache': 'cache',
'canwiki': 'canwiki',
'canyonwiki': 'canyonwiki',
'Ĉej': 'Ĉej',
'cellwiki': 'cellwiki',
'centralwikia': 'centralwikia',
'chapter': 'chapter',
'chej': 'chej',
'choralwiki': 'choralwiki',
'ciscavate': 'ciscavate',
'citizendium': 'citizendium',
'ckwiss': 'ckwiss',
'closed-zh-tw': 'closed-zh-tw',
'cndbname': 'cndbname',
'cndbtitle': 'cndbtitle',
'colab': 'colab',
'comcom': 'comcom',
'comixpedia': 'comixpedia',
'commons': 'commons',
'communityscheme': 'communityscheme',
'comune': 'comune',
'consciousness': 'consciousness',
'corpknowpedia': 'corpknowpedia',
'crazyhacks': 'crazyhacks',
'creatureswiki': 'creatureswiki',
'cxej': 'cxej',
'dawiki': 'dawiki',
'dbdump': 'dbdump',
'dcc': 'dcc',
'dcdatabase': 'dcdatabase',
'dcma': 'dcma',
'dejanews': 'dejanews',
'delicious': 'delicious',
'demokraatia': 'demokraatia',
'devmo': 'devmo',
'dict': 'dict',
'dictionary': 'dictionary',
'disinfopedia': 'disinfopedia',
'distributedproofreaders': 'distributedproofreaders',
'distributedproofreadersca': 'distributedproofreadersca',
'dk': 'dk',
'dmoz': 'dmoz',
'dmozs': 'dmozs',
'docbook': 'docbook',
## 'doi': 'doi',
'doom_wiki': 'doom_wiki',
'download': 'download',
'drae': 'drae',
'dreamhost': 'dreamhost',
'drumcorpswiki': 'drumcorpswiki',
'dwjwiki': 'dwjwiki',
'eĉei': 'eĉei',
'echei': 'echei',
'ecoreality': 'ecoreality',
'ecxei': 'ecxei',
'efnetceewiki': 'efnetceewiki',
'efnetcppwiki': 'efnetcppwiki',
'efnetpythonwiki': 'efnetpythonwiki',
'efnetxmlwiki': 'efnetxmlwiki',
'elibre': 'elibre',
'emacswiki': 'emacswiki',
'energiewiki': 'energiewiki',
'eokulturcentro': 'eokulturcentro',
'epo': 'epo',
'ethnologue': 'ethnologue',
'evowiki': 'evowiki',
'exotica': 'exotica',
'fanimutationwiki': 'fanimutationwiki',
'finalempire': 'finalempire',
'finalfantasy': 'finalfantasy',
'finnix': 'finnix',
'flickruser': 'flickruser',
'floralwiki': 'floralwiki',
'flyerwiki-de': 'flyerwiki-de',
'foldoc': 'foldoc',
'forthfreak': 'forthfreak',
'foundation': 'foundation',
'foxwiki': 'foxwiki',
'freebio': 'freebio',
'freebsdman': 'freebsdman',
'freeculturewiki': 'freeculturewiki',
'freedomdefined': 'freedomdefined',
'freefeel': 'freefeel',
'freekiwiki': 'freekiwiki',
'ganfyd': 'ganfyd',
'gausswiki': 'gausswiki',
'gentoo-wiki': 'gentoo',
'genwiki': 'genwiki',
'globalvoices': 'globalvoices',
'glossarwiki': 'glossarwiki',
'glossarywiki': 'glossarywiki',
'golem': 'golem',
'google': 'google',
'googledefine': 'googledefine',
'googlegroups': 'googlegroups',
'gotamac': 'gotamac',
'greatlakeswiki': 'greatlakeswiki',
'guildwiki': 'guildwiki',
'gutenberg': 'gutenberg',
'gutenbergwiki': 'gutenbergwiki',
'h2wiki': 'h2wiki',
'hammondwiki': 'hammondwiki',
'heroeswiki': 'heroeswiki',
'herzkinderwiki': 'herzkinderwiki',
'hkmule': 'hkmule',
'holshamtraders': 'holshamtraders',
'hrfwiki': 'hrfwiki',
'hrwiki': 'hrwiki',
'humancell': 'humancell',
'hupwiki': 'hupwiki',
'imdbcharacter': 'imdbcharacter',
'imdbcompany': 'imdbcompany',
'imdbname': 'imdbname',
'imdbtitle': 'imdbtitle',
'incubator': 'incubator',
'infoanarchy': 'infoanarchy',
'infosecpedia': 'infosecpedia',
'infosphere': 'infosphere',
'iso639-3': 'iso639-3',
'iuridictum': 'iuridictum',
'jameshoward': 'jameshoward',
'javanet': 'javanet',
'javapedia': 'javapedia',
'jefo': 'jefo',
'jiniwiki': 'jiniwiki',
'jspwiki': 'jspwiki',
'jstor': 'jstor',
'kamelo': 'kamelo',
'karlsruhe': 'karlsruhe',
'kerimwiki': 'kerimwiki',
'kinowiki': 'kinowiki',
'kmwiki': 'kmwiki',
'kontuwiki': 'kontuwiki',
'koslarwiki': 'koslarwiki',
'kpopwiki': 'kpopwiki',
'linguistlist': 'linguistlist',
'linuxwiki': 'linuxwiki',
'linuxwikide': 'linuxwikide',
'liswiki': 'liswiki',
'literateprograms': 'literateprograms',
'livepedia': 'livepedia',
'lojban': 'lojban',
'lostpedia': 'lostpedia',
'lqwiki': 'lqwiki',
'lugkr': 'lugkr',
'luxo': 'luxo',
'lyricwiki': 'lyricwiki',
'm': 'meta',
'm-w': 'm-w',
'mail': 'mail',
'mailarchive': 'mailarchive',
'mariowiki': 'mariowiki',
'marveldatabase': 'marveldatabase',
'meatball': 'meatball',
'mediazilla': 'mediazilla',
'memoryalpha': 'memoryalpha',
'meta': 'meta',
'metawiki': 'metawiki',
'metawikipedia': 'metawikipedia',
'mineralienatlas': 'mineralienatlas',
'moinmoin': 'moinmoin',
'monstropedia': 'monstropedia',
'mosapedia': 'mosapedia',
'mozcom': 'mozcom',
'mozillawiki': 'mozillawiki',
'mozillazinekb': 'mozillazinekb',
'musicbrainz': 'musicbrainz',
'mw': 'mw',
'mwod': 'mwod',
'mwot': 'mwot',
'n': 'wikinews',
'netvillage': 'netvillage',
'nkcells': 'nkcells',
'nomcom': 'nomcom',
'nosmoke': 'nosmoke',
'nost': 'nost',
'oeis': 'oeis',
'oldwikisource': 'oldwikisource',
'olpc': 'olpc',
'onelook': 'onelook',
'openfacts': 'openfacts',
'openstreetmap': 'openstreetmap',
'openwetware': 'openwetware',
'openwiki': 'openwiki',
'opera7wiki': 'opera7wiki',
'organicdesign': 'organicdesign',
'orgpatterns': 'orgpatterns',
'orthodoxwiki': 'orthodoxwiki',
'osi reference model': 'osi reference model',
'otrs': 'otrs',
'otrswiki': 'otrswiki',
'ourmedia': 'ourmedia',
'paganwiki': 'paganwiki',
'panawiki': 'panawiki',
'pangalacticorg': 'pangalacticorg',
'patwiki': 'patwiki',
'perlconfwiki': 'perlconfwiki',
'perlnet': 'perlnet',
'personaltelco': 'personaltelco',
'phpwiki': 'phpwiki',
'phwiki': 'phwiki',
'planetmath': 'planetmath',
'pmeg': 'pmeg',
'pmwiki': 'pmwiki',
'psycle': 'psycle',
'purlnet': 'purlnet',
'pythoninfo': 'pythoninfo',
'pythonwiki': 'pythonwiki',
'pywiki': 'pywiki',
'q': 'wikiquote',
'qcwiki': 'qcwiki',
'quality': 'quality',
'qwiki': 'qwiki',
'r3000': 'r3000',
'raec': 'raec',
'rakwiki': 'rakwiki',
'reuterswiki': 'reuterswiki',
'rev': 'rev',
'revo': 'revo',
'rfc': 'rfc',
'rheinneckar': 'rheinneckar',
'robowiki': 'robowiki',
'rowiki': 'rowiki',
's': 'wikisource',
's23wiki': 's23wiki',
'scholar': 'scholar',
'schoolswp': 'schoolswp',
'scores': 'scores',
'scoutwiki': 'scoutwiki',
'scramble': 'scramble',
'seapig': 'seapig',
'seattlewiki': 'seattlewiki',
'seattlewireless': 'seattlewireless',
'senseislibrary': 'senseislibrary',
'silcode': 'silcode',
'slashdot': 'slashdot',
'slwiki': 'slwiki',
'smikipedia': 'smikipedia',
'sourceforge': 'sourceforge',
'spcom': 'spcom',
'species': 'species',
'squeak': 'squeak',
'stable': 'stable',
'strategywiki': 'strategywiki',
'sulutil': 'sulutil',
'susning': 'susning',
'svgwiki': 'svgwiki',
'svn': 'svn',
'swinbrain': 'swinbrain',
'swingwiki': 'swingwiki',
'swtrain': 'swtrain',
'tabwiki': 'tabwiki',
'takipedia': 'takipedia',
'tavi': 'tavi',
'tclerswiki': 'tclerswiki',
'technorati': 'technorati',
'tejo': 'tejo',
'tesoltaiwan': 'tesoltaiwan',
'testwiki': 'testwiki',
'thelemapedia': 'thelemapedia',
'theopedia': 'theopedia',
'theppn': 'theppn',
'thinkwiki': 'thinkwiki',
'tibiawiki': 'tibiawiki',
'ticket': 'ticket',
'tmbw': 'tmbw',
'tmnet': 'tmnet',
'tmwiki': 'tmwiki',
'tokyonights': 'tokyonights',
'tools': 'tools',
'translatewiki': 'translatewiki',
'trash!italia': 'trash!italia',
'tswiki': 'tswiki',
'turismo': 'turismo',
'tviv': 'tviv',
'tvtropes': 'tvtropes',
'twiki': 'twiki',
'twistedwiki': 'twistedwiki',
'tyvawiki': 'tyvawiki',
'uncyclopedia': 'uncyclopedia',
'unreal': 'unreal',
'urbandict': 'urbandict',
'usej': 'usej',
'usemod': 'usemod',
'v': 'wikiversity',
'valuewiki': 'valuewiki',
'veropedia': 'veropedia',
'vinismo': 'vinismo',
'vkol': 'vkol',
'vlos': 'vlos',
'voipinfo': 'voipinfo',
'w': 'wikipedia',
'warpedview': 'warpedview',
'webdevwikinl': 'webdevwikinl',
'webisodes': 'webisodes',
'webseitzwiki': 'webseitzwiki',
'wg': 'wg',
'wiki': 'wiki',
'wikia': 'wikia',
'wikianso': 'wikianso',
'wikiasite': 'wikiasite',
'wikible': 'wikible',
'wikibooks': 'wikibooks',
'wikichat': 'wikichat',
'wikichristian': 'wikichristian',
'wikicities': 'wikicities',
'wikicity': 'wikicity',
'wikif1': 'wikif1',
'wikifur': 'wikifur',
'wikihow': 'wikihow',
'wikiindex': 'wikiindex',
'wikilemon': 'wikilemon',
'wikilivres': 'wikilivres',
'wikimac-de': 'wikimac-de',
'wikimac-fr': 'wikimac-fr',
'wikimedia': 'wikimedia',
'wikinews': 'wikinews',
'wikinfo': 'wikinfo',
'wikinurse': 'wikinurse',
'wikinvest': 'wikinvest',
'wikipaltz': 'wikipaltz',
'wikipedia': 'wikipedia',
'wikipediawikipedia': 'wikipediawikipedia',
'wikiquote': 'wikiquote',
'wikireason': 'wikireason',
'wikischool': 'wikischool',
'wikisophia': 'wikisophia',
'wikisource': 'wikisource',
'wikispecies': 'wikispecies',
'wikispot': 'wikispot',
'wikiti': 'wikiti',
'wikitravel': 'wikitravel',
'wikitree': 'wikitree',
'wikiversity': 'wikiversity',
'wikiwikiweb': 'wikiwikiweb',
'wikt': 'wiktionary',
'wiktionary': 'wiktionary',
'wipipedia': 'wipipedia',
'wlug': 'wlug',
'wm2005': 'wm2005',
'wm2006': 'wm2006',
'wm2007': 'wm2007',
'wm2008': 'wm2008',
'wm2009': 'wm2009',
'wm2010': 'wm2010',
'wmania': 'wmania',
'wmcz': 'wmcz',
'wmf': 'wmf',
'wmrs': 'wmrs',
'wmse': 'wmse',
'wookieepedia': 'wookieepedia',
'world66': 'world66',
'wowwiki': 'wowwiki',
'wqy': 'wqy',
'wurmpedia': 'wurmpedia',
'wznan': 'wznan',
'xboxic': 'xboxic',
'zh-cfr': 'zh-cfr',
'zrhwiki': 'zrhwiki',
'zum': 'zum',
'zwiki': 'zwiki',
'zzz wiki': 'zzz wiki',
}
# A list of category redirect template names in different languages
# Note: It *is* necessary to list template redirects here
self.category_redirect_templates = {
'_default': []
}
# A list of languages that use hard (instead of soft) category redirects
self.use_hard_category_redirects = []
# A list of disambiguation template names in different languages
self.disambiguationTemplates = {
'_default': []
}
# A list of projects that share cross-project sessions.
self.cross_projects = []
# A list with the name for cross-project cookies.
# default for wikimedia centralAuth extensions.
self.cross_projects_cookies = ['centralauth_Session',
'centralauth_Token',
'centralauth_User']
self.cross_projects_cookie_username = 'centralauth_User'
# A list with the name in the cross-language flag permissions
self.cross_allowed = []
# A list with the name of the category containing disambiguation
# pages for the various languages. Only one category per language,
# and without the namespace, so add things like:
# 'en': "Disambiguation"
self.disambcatname = {}
# On most wikis page names must start with a capital letter, but some
# languages don't use this.
self.nocapitalize = []
# attop is a list of languages that prefer to have the interwiki
# links at the top of the page.
self.interwiki_attop = []
# on_one_line is a list of languages that want the interwiki links
# one-after-another on a single line
self.interwiki_on_one_line = []
# String used as separator between interwiki links and the text
self.interwiki_text_separator = config.line_separator * 2
# Similar for category
self.category_attop = []
# on_one_line is a list of languages that want the category links
# one-after-another on a single line
self.category_on_one_line = []
# String used as separator between category links and the text
self.category_text_separator = config.line_separator * 2
# When both at the bottom should categories come after interwikilinks?
self.categories_last = []
# Which languages have a special order for putting interlanguage
# links, and what order is it? If a language is not in
# interwiki_putfirst, alphabetical order on language code is used.
# For languages that are in interwiki_putfirst, interwiki_putfirst
# is checked first, and languages are put in the order given there.
# All other languages are put after those, in code-alphabetical
# order.
self.interwiki_putfirst = {}
# Languages in interwiki_putfirst_doubled should have a number plus
# a list of languages. If there are at least the number of interwiki
# links, all languages in the list should be placed at the front as
# well as in the normal list.
self.interwiki_putfirst_doubled = {} # THIS APPEARS TO BE UNUSED!
# Some families, e. g. commons and meta, are not multilingual and
# forward interlanguage links to another family (wikipedia).
# These families can set this variable to the name of the target
# family.
self.interwiki_forward = None
# Some families, e. g. wikipedia, receive forwarded interlanguage
# links from other families, e. g. incubator, commons, or meta.
# These families can set this variable to the names of their source
# families.
self.interwiki_forwarded_from = {}
# Which language codes no longer exist and by which language code
# should they be replaced. If for example the language with code xx:
# now should get code yy:, add {'xx':'yy'} to obsolete. If all
# links to language xx: should be removed, add {'xx': None}.
self.obsolete = {}
# Language codes of the largest wikis. They should be roughly sorted
# by size.
self.languages_by_size = []
# Some languages belong to a group where the possibility is high that
# equivalent articles have identical titles among the group.
self.language_groups = {
# languages using the arabic script (incomplete)
'arab': [
'ar', 'arz', 'ps', 'sd', 'ur', 'bjn', 'ckb',
# languages using multiple scripts, including arabic
'kk', 'ku', 'tt', 'ug', 'pnb'
],
# languages that use chinese symbols
'chinese': [
'wuu', 'zh', 'zh-classical', 'zh-yue', 'gan', 'ii',
# languages using multiple/mixed scripts, including chinese
'ja', 'za'
],
# languages that use the cyrillic alphabet
'cyril': [
'ab', 'av', 'ba', 'be', 'be-x-old', 'bg', 'bxr', 'ce', 'cu',
'cv', 'kbd', 'koi', 'kv', 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo',
'myv', 'mhr', 'mrj', 'os', 'ru', 'rue', 'sah', 'tg', 'tk',
'udm', 'uk', 'xal',
# languages using multiple scripts, including cyrillic
'ha', 'kk', 'sh', 'sr', 'tt'
],
# languages that use a greek script
'grec': [
'el', 'grc', 'pnt'
# languages using multiple scripts, including greek
],
# languages that use the latin alphabet
'latin': [
'aa', 'ace', 'af', 'ak', 'als', 'an', 'ang', 'ast', 'ay', 'bar',
'bat-smg', 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam',
'cdo', 'ceb', 'ch', 'cho', 'chy', 'co', 'crh', 'cs', 'csb',
'cy', 'da', 'de', 'diq', 'dsb', 'ee', 'eml', 'en', 'eo', 'es',
'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', 'fj', 'fo', 'fr',
'frp', 'frr', 'fur', 'fy', 'ga', 'gag', 'gd', 'gl', 'gn', 'gv',
'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia',
'id', 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv',
'kaa', 'kab', 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la',
'lad', 'lb', 'lg', 'li', 'lij', 'lmo', 'ln', 'lt', 'ltg', 'lv',
'map-bms', 'mg', 'mh', 'mi', 'ms', 'mt', 'mus', 'mwl', 'na',
'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', 'no', 'nov',
'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pcd',
'pdc', 'pfl', 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro',
'roa-rup', 'roa-tara', 'rw', 'sc', 'scn', 'sco', 'se', 'sg',
'simple', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'srn', 'ss', 'st',
'stq', 'su', 'sv', 'sw', 'szl', 'tet', 'tl', 'tn', 'to', 'tpi',
'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', 'vec', 'vi', 'vls',
'vo', 'wa', 'war', 'wo', 'xh', 'yo', 'zea', 'zh-min-nan', 'zu',
# languages using multiple scripts, including latin
'az', 'chr', 'ckb', 'ha', 'iu', 'kk', 'ku', 'rmy', 'sh', 'sr',
'tt', 'ug', 'za'
],
# Scandinavian languages
'scand': [
'da', 'fo', 'is', 'nb', 'nn', 'no', 'sv'
],
}
# LDAP domain if your wiki uses LDAP authentication,
# http://www.mediawiki.org/wiki/Extension:LDAP_Authentication
self.ldapDomain = ()
# Allows crossnamespace interwiki linking.
# Lists the possible crossnamespaces combinations
# keys are originating NS
# values are dicts where:
# keys are the originating langcode, or _default
# values are dicts where:
# keys are the languages that can be linked to from the lang+ns, or
# '_default'; values are a list of namespace numbers
self.crossnamespace = {}
##
## Examples :
## Allowing linking to pt' 102 NS from any other lang' 0 NS is
# self.crossnamespace[0] = {
# '_default': { 'pt': [102]}
# }
## While allowing linking from pt' 102 NS to any other lang' = NS is
# self.crossnamespace[102] = {
# 'pt': { '_default': [0]}
# }
@property
def iwkeys(self):
if self.interwiki_forward:
return pywikibot.Family(self.interwiki_forward).langs.keys()
return self.langs.keys()
def _addlang(self, code, location, namespaces={}):
"""Add a new language to the langs and namespaces of the family.
This is supposed to be called in the constructor of the family.
"""
self.langs[code] = location
## for num, val in namespaces.items():
## self.namespaces[num][code] = val
def get_known_families(self, site):
return self.known_families
def linktrail(self, code, fallback='_default'):
if code in self.linktrails:
return self.linktrails[code]
elif fallback:
return self.linktrails[fallback]
else:
raise KeyError(
"ERROR: linktrail in language %(language_code)s unknown"
% {'language_code': code})
def category_redirects(self, code, fallback="_default"):
if code in self.category_redirect_templates:
return self.category_redirect_templates[code]
elif fallback:
return self.category_redirect_templates[fallback]
else:
raise KeyError(
"ERROR: title for category redirect template in language '%s' unknown"
% code)
def disambig(self, code, fallback='_default'):
if code in self.disambiguationTemplates:
return self.disambiguationTemplates[code]
elif fallback:
return self.disambiguationTemplates[fallback]
else:
raise KeyError(
"ERROR: title for disambig template in language %s unknown"
% code)
# Methods
def protocol(self, code):
"""
Can be overridden to return 'https'. Other protocols are not supported.
"""
return 'http'
def hostname(self, code):
"""The hostname to use for standard http connections."""
return self.langs[code]
def ssl_hostname(self, code):
"""The hostname to use for SSL connections."""
return "secure.wikimedia.org"
def scriptpath(self, code):
"""The prefix used to locate scripts on this wiki.
This is the value displayed when you enter {{SCRIPTPATH}} on a
wiki page (often displayed at [[Help:Variables]] if the wiki has
copied the master help page correctly).
The default value is the one used on Wikimedia Foundation wikis,
but needs to be overridden in the family file for any wiki that
uses a different value.
"""
return '/w'
def ssl_pathprefix(self, code):
"""The path prefix for secure.wikimedia.org access."""
# Override this ONLY if the wiki family uses a different path
# pattern than /familyname/languagecode
return "/%s/%s" % (self.name, code)
def path(self, code):
return '%s/index.php' % self.scriptpath(code)
def querypath(self, code):
return '%s/query.php' % self.scriptpath(code)
def apipath(self, code):
return '%s/api.php' % self.scriptpath(code)
def nicepath(self, code):
return '/wiki/'
def nice_get_address(self, code, title):
return '%s%s' % (self.nicepath(code), title)
def dbName(self, code):
# returns the name of the MySQL database
return '%s%s' % (code, self.name)
# Which version of MediaWiki is used?
def version(self, code):
"""Return MediaWiki version number as a string."""
# Don't use this, use versionnumber() instead. This only exists
# to not break family files.
# Here we return the latest mw release for downloading
return '1.20wmf2'
def versionnumber(self, code):
"""Return an int identifying MediaWiki version.
Currently this is implemented as returning the minor version
number; i.e., 'X' in version '1.X.Y'
"""
R = re.compile(r"(\d+).(\d+)")
M = R.search(self.version(code))
if not M:
# Version string malformatted; assume it should have been 1.10
return 10
return 1000 * int(M.group(1)) + int(M.group(2)) - 1000
def code2encoding(self, code):
"""Return the encoding for a specific language wiki"""
return 'utf-8'
def code2encodings(self, code):
"""Return a list of historical encodings for a specific language
wiki"""
return self.code2encoding(code),
# aliases
def encoding(self, code):
"""Return the encoding for a specific language wiki"""
return self.code2encoding(code)
def encodings(self, code):
"""Return a list of historical encodings for a specific language
wiki"""
return self.code2encodings(code)
def __cmp__(self, otherfamily):
try:
return cmp(self.name, otherfamily.name)
except AttributeError:
return cmp(id(self), id(otherfamily))
def __hash__(self):
return hash(self.name)
def __repr__(self):
return 'Family("%s")' % self.name
def RversionTab(self, code):
"""Change this to some regular expression that shows the page we
found is an existing page, in case the normal regexp does not work.
"""
return None
def has_query_api(self, code):
"""Is query.php installed in the wiki?"""
return False
def shared_image_repository(self, code):
"""Return the shared image repository, if any."""
return (None, None)
def shared_data_repository(self, code, transcluded=False):
"""Return the shared wikidata repository, if any."""
return (None, None)
@pywikibot.deprecated("Site.getcurrenttime()")
def server_time(self, code):
"""
DEPRECATED, use Site.getcurrenttime() instead
Return a datetime object representing server time"""
return pywikibot.Site(code, self).getcurrenttime()
def isPublic(self, code):
"""Does the wiki require logging in before viewing it?"""
return True
def post_get_convert(self, site, getText):
"""Does a conversion on the retrieved text from the wiki
i.e. Esperanto X-conversion """
return getText
def pre_put_convert(self, site, putText):
"""Does a conversion on the text to insert on the wiki
i.e. Esperanto X-conversion """
return putText
# Parent class for all wikimedia families
class WikimediaFamily(Family):
def __init__(self):
super(WikimediaFamily, self).__init__()
self.namespacesWithSubpage.extend([4, 12])
# CentralAuth cross avaliable projects.
self.cross_projects = [
'commons', 'incubator', 'mediawiki', 'meta', 'species', 'test',
'wikibooks', 'wikidata', 'wikinews', 'wikipedia', 'wikiquote',
'wikisource', 'wikiversity', 'wiktionary',
]
def version(self, code):
"""Return Wikimedia projects version number as a string."""
# Don't use this, use versionnumber() instead. This only exists
# to not break family files.
return '1.22wmf10'
def shared_image_repository(self, code):
return ('commons', 'commons')
| StarcoderdataPython |
1700942 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Generate resource loading code and resource identifiers for all
files in <faint-root>/graphics>
"""
import os
import sys
import build_sys.util.util as util
DEFAULT_CURSORS = {"ARROW": "wxCURSOR_ARROW",
"CARET": "wxCURSOR_IBEAM",
"RESIZE_NW": "wxCURSOR_SIZENWSE",
"RESIZE_NE": "wxCURSOR_SIZENESW",
"RESIZE_WE": "wxCURSOR_SIZEWE",
"RESIZE_NS": "wxCURSOR_SIZENS",
"BLANK": "wxCURSOR_BLANK" }
# Whether the resource enumerations for icons should have a trailing
# emacs iimage-comment, so that the image is shown within Emacs
ENABLE_EMACS_IIMAGE = True
class Paths:
def __init__(self, faint_root):
self.faint_root = faint_root
self.graphics_dir = os.path.join(faint_root, "graphics")
self.generated_dir = os.path.join(faint_root, 'generated', 'resource')
self.target_hh = os.path.join(self.generated_dir, "load-resources.hh")
self.target_cpp = os.path.join(self.generated_dir, "load-resources.cpp")
self.resource_id_hh = os.path.join(self.generated_dir, "gen-resource-id.hh")
self.index_file = os.path.join(self.generated_dir, "resource-index.txt")
self.resource_files = self._list_resources()
def _list_resources(self):
return sorted([file for file in os.listdir(self.graphics_dir)
if file.find(".") != -1])
def _need_generate(paths):
"""Check if the resource loading needs to be generated"""
if not os.path.exists(paths.generated_dir):
return True
if not os.path.exists(paths.index_file):
return True
# Use the index file to determine if regeneration is necessary
with open(paths.index_file, 'r',newline='\n') as index_file:
indexed = [item for item in
index_file.read().split('\n') if len(item) != 0 and
not item.startswith("#")]
return indexed != paths.resource_files
def _write_index(paths):
with open(paths.index_file, 'w', newline='\n') as index_file:
index_file.write('# Generated by %s\n' % os.path.basename(__file__))
index_file.write('#\n')
index_file.write('# Used for determining if resources have changed when\n'
'# generating resource names and initialization.\n')
for resource in paths.resource_files:
index_file.write(resource + "\n")
def _strip_ext(file_name):
dot = file_name.find('.')
assert(dot != -1)
return file_name[:dot]
def _write_load_function(paths):
with open(paths.target_cpp, 'w', newline='\n') as cpp_file:
cpp_file.write('// Generated by %s\n' % os.path.basename(__file__))
cpp_file.write('#include "generated/resource/gen-resource-id.hh"\n')
cpp_file.write('#include "generated/resource/load-resources.hh"\n')
cpp_file.write('#include "util-wx/file-path-util.hh"\n')
cpp_file.write('\n')
cpp_file.write('namespace faint{\n')
cpp_file.write('void load_faint_resources(faint::Art& art){\n')
cpp_file.write('\n')
cpp_file.write(' // Cursors\n')
for cursor_file in _cursor_files(paths):
res_id = _resource_identifier(cursor_file)
cpp_file.write(' art.Load("%s", Cursor::%s);\n' % (cursor_file, res_id))
cpp_file.write('\n')
cpp_file.write(' // Cursors (wx-defaults)\n')
for cursorId in sorted(DEFAULT_CURSORS.keys()):
cpp_file.write(' art.Add(wxCursor(%s), Cursor::%s);\n' % (
DEFAULT_CURSORS[cursorId], cursorId))
cpp_file.write('\n')
cpp_file.write(' // Icons\n')
for icon_file in _icon_files(paths):
cpp_file.write(' art.Load("%s", Icon::%s);\n' % (icon_file, _resource_identifier(icon_file)))
cpp_file.write('}\n')
cpp_file.write('} // namespace\n')
with open(paths.target_hh, 'w') as hh_file:
hh_file.write("// Generated by %s\n" % os.path.basename(__file__))
hh_file.write('#ifndef %s\n' % util.create_include_guard(paths.target_hh))
hh_file.write('#define %s\n' % util.create_include_guard(paths.target_hh))
hh_file.write('#include "gui/art.hh"\n')
hh_file.write("\n")
hh_file.write('namespace faint{\n')
hh_file.write("\n")
hh_file.write('void load_faint_resources(Art&);\n')
hh_file.write("\n")
hh_file.write('} // namespace\n')
hh_file.write("\n")
hh_file.write('#endif\n')
PREFIXES = ["ICON_", "CURSOR_"]
def _resource_identifier(file_path):
identifier = _strip_ext(file_path).upper().replace("-", "_")
for prefix in PREFIXES:
if identifier.startswith(prefix):
identifier = identifier[len(prefix):]
break
return identifier
def _cursor_files(paths):
return [p for p in paths.resource_files if p.startswith("cursor")]
def _icon_files(paths):
return [p for p in paths.resource_files if not p.startswith("cursor") and p.endswith('.png')]
def _cursor_labels(paths):
return [_resource_identifier(p) for p in _cursor_files(paths)]
def _icon_labels(paths):
icon_files = _icon_files(paths)
return [_resource_identifier(p) for p in icon_files], icon_files
def _emacs_iimage(path):
return '<%s>' % path
def _icon_entry(indent, last, label, path, append_emacs_iimage):
entry = (' ' * indent) + label
if not last:
entry += ","
if append_emacs_iimage:
entry += " // <../../graphics/%s>" % path
return entry + '\n'
def _write_header(paths):
hh_file_name = paths.resource_id_hh
with open(hh_file_name, 'w') as hh_file:
hh_file.write('// Generated by %s\n' % os.path.basename(__file__))
hh_file.write('#ifndef %s\n' % util.create_include_guard(hh_file_name))
hh_file.write('#define %s\n' % util.create_include_guard(hh_file_name))
hh_file.write("\n")
hh_file.write("namespace faint{\n");
hh_file.write("\n");
hh_file.write("enum class Cursor{\n")
cursor_labels = _cursor_labels(paths)
cursor_labels.append('DONT_CARE')
cursor_labels.extend(DEFAULT_CURSORS.keys())
cursor_labels = sorted(cursor_labels)
hh_file.write(' ' + ',\n '.join(cursor_labels) + '\n')
hh_file.write('};\n')
hh_file.write('\n')
hh_file.write('enum class Icon{\n')
icon_labels, icon_paths = _icon_labels(paths)
for num, (label, path) in enumerate(zip(icon_labels, icon_paths)):
last = num == len(icon_labels) - 1
hh_file.write(_icon_entry(2, last, label, path, ENABLE_EMACS_IIMAGE))
hh_file.write('};\n')
hh_file.write('\n')
hh_file.write("} // namespace\n");
hh_file.write('\n')
hh_file.write('#endif')
def run(faint_root, force=False):
paths = Paths(faint_root)
if not force and not _need_generate(paths):
print("* Resource-loading up to date.")
return
print("* Generating resource-loading.")
if not os.path.exists(paths.generated_dir):
os.makedirs(paths.generated_dir)
_write_load_function(paths)
_write_header(paths)
_write_index(paths)
if __name__ == '__main__':
force = "--force" in sys.argv
faint_root = os.path.abspath("../")
run(faint_root, force)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.